summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
committerAndré Fabian Silva Delgado <emulatorman@parabola.nu>2016-09-11 04:34:46 -0300
commit863981e96738983919de841ec669e157e6bdaeb0 (patch)
treed6d89a12e7eb8017837c057935a2271290907f76 /drivers/net
parent8dec7c70575785729a6a9e6719a955e9c545bcab (diff)
Linux-libre 4.7.1-gnupck-4.7.1-gnu
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig17
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/Space.c21
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/arcnet/com90xx.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c75
-rw-r--r--drivers/net/bonding/bond_alb.c7
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/can/at91_can.c5
-rw-r--r--drivers/net/can/c_can/c_can.c38
-rw-r--r--drivers/net/can/dev.c9
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c187
-rw-r--r--drivers/net/can/janz-ican3.c104
-rw-r--r--drivers/net/can/mscan/mscan.c4
-rw-r--r--drivers/net/can/sja1000/plx_pci.c64
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/spi/mcp251x.c3
-rw-r--r--drivers/net/can/usb/Kconfig5
-rw-r--r--drivers/net/can/usb/ems_usb.c4
-rw-r--r--drivers/net/can/usb/esd_usb2.c4
-rw-r--r--drivers/net/can/usb/gs_usb.c17
-rw-r--r--drivers/net/can/usb/kvaser_usb.c8
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c4
-rw-r--r--drivers/net/cris/eth_v10.c2
-rw-r--r--drivers/net/dsa/Kconfig45
-rw-r--r--drivers/net/dsa/Makefile15
-rw-r--r--drivers/net/dsa/bcm_sf2.c62
-rw-r--r--drivers/net/dsa/mv88e6060.c47
-rw-r--r--drivers/net/dsa/mv88e6060.h11
-rw-r--r--drivers/net/dsa/mv88e6123.c124
-rw-r--r--drivers/net/dsa/mv88e6131.c177
-rw-r--r--drivers/net/dsa/mv88e6171.c123
-rw-r--r--drivers/net/dsa/mv88e6352.c345
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2280
-rw-r--r--drivers/net/dsa/mv88e6xxx.h381
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c6
-rw-r--r--drivers/net/ethernet/8390/lib8390.c4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/amd/7990.c12
-rw-r--r--drivers/net/ethernet/amd/a2065.c9
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c8
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c15
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h2
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/alx.h4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c63
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c10
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c396
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c300
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h467
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c54
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c152
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c16
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c95
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h40
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c147
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c330
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c115
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h29
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c72
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c1
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/ethoc.c23
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c36
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c87
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c45
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c47
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c52
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c17
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c93
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c260
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c209
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h21
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c171
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c67
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c187
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h44
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c23
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c11
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c478
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig80
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c30
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h111
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c44
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c173
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h51
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c352
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c150
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c226
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c141
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c462
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h28
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c63
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c168
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c386
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c332
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h20
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1083
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c509
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h70
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1039
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c329
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c503
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c145
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h30
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h108
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h40
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c221
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c42
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c196
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h99
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c161
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c50
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1054
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c117
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h302
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c693
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h29
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c230
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h37
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c298
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c224
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h6
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c201
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c439
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h591
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c752
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c341
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1060
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1366
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c779
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c113
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1007
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c135
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c307
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c226
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c79
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c138
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c736
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h82
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h682
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c581
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h139
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c1001
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c484
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c50
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c7
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c41
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h22
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1101
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c30
-rw-r--r--drivers/net/ethernet/netx-eth.c12
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig31
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h84
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c186
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c563
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c788
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h148
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c67
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c160
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c725
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c328
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c356
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h80
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h36
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c322
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c57
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3613
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h388
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1102
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h990
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h45
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c628
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c547
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c4
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c44
-rw-r--r--drivers/net/ethernet/renesas/ravb.h206
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c289
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c26
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c39
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c32
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h12
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c113
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h255
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c407
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c389
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c354
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c349
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c659
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c102
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c13
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw.c60
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c4
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c3
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig14
-rw-r--r--drivers/net/ethernet/wiznet/Makefile1
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c466
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1076
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h37
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/fddi/skfp/Makefile2
-rw-r--r--drivers/net/fjes/fjes_hw.c30
-rw-r--r--drivers/net/fjes/fjes_hw.h9
-rw-r--r--drivers/net/fjes/fjes_main.c137
-rw-r--r--drivers/net/geneve.c113
-rw-r--r--drivers/net/gtp.c1375
-rw-r--r--drivers/net/hamradio/baycom_epp.c14
-rw-r--r--drivers/net/hamradio/hdlcdrv.c6
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/scc.c2
-rw-r--r--drivers/net/hamradio/yam.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h29
-rw-r--r--drivers/net/hyperv/netvsc.c161
-rw-r--r--drivers/net/hyperv/netvsc_drv.c428
-rw-r--r--drivers/net/hyperv/rndis_filter.c88
-rw-r--r--drivers/net/ieee802154/adf7242.c3
-rw-r--r--drivers/net/ieee802154/at86rf230.c6
-rw-r--r--drivers/net/ieee802154/atusb.c91
-rw-r--r--drivers/net/ieee802154/mrf24j40.c14
-rw-r--r--drivers/net/ifb.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c19
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/bfin_sir.c2
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/irda/nsc-ircc.c11
-rw-r--r--drivers/net/irda/sh_irda.c875
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c2
-rw-r--r--drivers/net/irda/via-ircc.c8
-rw-r--r--drivers/net/macsec.c169
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c39
-rw-r--r--drivers/net/phy/dp83867.c13
-rw-r--r--drivers/net/phy/fixed_phy.c30
-rw-r--r--drivers/net/phy/lxt.c22
-rw-r--r--drivers/net/phy/marvell.c82
-rw-r--r--drivers/net/phy/mdio-mux.c10
-rw-r--r--drivers/net/phy/mdio_bus.c10
-rw-r--r--drivers/net/phy/micrel.c34
-rw-r--r--drivers/net/phy/phy.c105
-rw-r--r--drivers/net/phy/phy_device.c5
-rw-r--r--drivers/net/phy/smsc.c17
-rw-r--r--drivers/net/ppp/ppp_generic.c320
-rw-r--r--drivers/net/rionet.c6
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c118
-rw-r--r--drivers/net/usb/catc.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/ch9200.c3
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/lan78xx.c4
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/r8152.c158
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c55
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/veth.c7
-rw-r--r--drivers/net/virtio_net.c18
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c272
-rw-r--r--drivers/net/vxlan.c355
-rw-r--r--drivers/net/wan/cosa.c2
-rw-r--r--drivers/net/wan/farsync.c6
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/sbni.c8
-rw-r--r--drivers/net/wimax/i2400m/netdev.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c4
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c4
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c50
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c482
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h118
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c129
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h60
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c717
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c300
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h18
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c729
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c270
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h17
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.c44
-rw-r--r--drivers/net/wireless/ath/ath10k/swap.h9
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/testmode.c197
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c55
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h44
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c1
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.h4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c250
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h128
-rw-r--r--drivers/net/wireless/ath/ath10k/wow.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h10
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c32
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c8
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c10
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c22
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h3
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c27
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig40
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c82
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9340_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar953x_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar956x_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c138
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/common-init.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/debug_sta.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/dynack.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c69
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c277
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c15
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h90
-rw-r--r--drivers/net/wireless/ath/ath9k/rng.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c12
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c6
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c18
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c8
-rw-r--r--drivers/net/wireless/ath/regd.c16
-rw-r--r--drivers/net/wireless/ath/regd.h2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h55
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c145
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c228
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h14
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c12
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c337
-rw-r--r--drivers/net/wireless/ath/wil6210/debug.c22
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c196
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c99
-rw-r--r--drivers/net/wireless/ath/wil6210/ioctl.c11
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c170
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c9
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c259
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c204
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h19
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c69
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h12
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h177
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_platform.h8
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c233
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h1325
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/atmel/atmel.c4
-rw-r--r--drivers/net/wireless/broadcom/b43/b43.h4
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c34
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ac.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_common.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_ht.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lcn.c10
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_lp.c30
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_n.c176
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_lpphy.c14
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_nphy.c16
-rw-r--r--drivers/net/wireless/broadcom/b43/tables_phy_lcn.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/xmit.c8
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c12
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/xmit.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c157
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c38
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c247
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c30
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c209
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c48
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c41
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c14
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c10
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c18
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c20
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c41
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.c6
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.c92
-rw-r--r--drivers/net/wireless/intel/iwlegacy/common.h30
-rw-r--r--drivers/net/wireless/intel/iwlegacy/debug.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-1000.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-2000.c14
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-5000.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-6000.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-7000.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-9000.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h127
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h41
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c68
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c1315
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c171
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h113
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c198
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c79
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c173
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h149
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c371
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sf.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c666
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h100
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c257
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c210
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c123
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c64
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c97
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/cfg.c6
-rw-r--r--drivers/net/wireless/intersil/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/orinoco_usb.c2
-rw-r--r--drivers/net/wireless/intersil/orinoco/scan.c4
-rw-r--r--drivers/net/wireless/intersil/p54/eeprom.c32
-rw-r--r--drivers/net/wireless/intersil/p54/main.c4
-rw-r--r--drivers/net/wireless/intersil/p54/p54.h2
-rw-r--r--drivers/net/wireless/intersil/p54/txrx.c4
-rw-r--r--drivers/net/wireless/intersil/prism54/isl_38xx.c35
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c21
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h1
-rw-r--r--drivers/net/wireless/marvell/libertas/cfg.c10
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c5
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c74
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfp.c12
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c127
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h11
-rw-r--r--drivers/net/wireless/marvell/mwifiex/init.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h20
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c113
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.h22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/scan.c202
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c88
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.h7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c42
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c15
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_ioctl.c47
-rw-r--r--drivers/net/wireless/marvell/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/txrx.c13
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_txrx.c96
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c11
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c88
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/init.c4
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c34
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h7
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00dev.c43
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00usb.c21
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt61pci.c22
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt73usb.c22
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c16
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c4
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h99
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c93
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile3
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h301
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c586
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c1525
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c397
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c1682
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c)3601
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h46
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c44
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c847
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c611
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c652
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c851
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c41
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c12
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/regd.c16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c8
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h6
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c100
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_pkt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h2
-rw-r--r--drivers/net/wireless/st/cw1200/main.c10
-rw-r--r--drivers/net/wireless/st/cw1200/scan.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c6
-rw-r--r--drivers/net/wireless/st/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/st/cw1200/wsm.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/ps.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c12
-rw-r--r--drivers/net/wireless/ti/wl12xx/scan.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/cmd.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/event.c6
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c22
-rw-r--r--drivers/net/wireless/ti/wl18xx/scan.c8
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c36
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c17
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h3
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c42
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/scan.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h2
-rw-r--r--drivers/net/wireless/wl3501_cs.c4
-rw-r--r--drivers/net/wireless/zydas/zd1201.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c4
-rw-r--r--drivers/net/xen-netback/Makefile2
-rw-r--r--drivers/net/xen-netback/common.h74
-rw-r--r--drivers/net/xen-netback/hash.c384
-rw-r--r--drivers/net/xen-netback/interface.c133
-rw-r--r--drivers/net/xen-netback/netback.c249
-rw-r--r--drivers/net/xen-netback/xenbus.c79
920 files changed, 61349 insertions, 27500 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index befd67df0..0c5415b05 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -192,6 +192,23 @@ config GENEVE
To compile this driver as a module, choose M here: the module
will be called geneve.
+config GTP
+ tristate "GPRS Tunneling Protocol datapath (GTP-U)"
+ depends on INET && NET_UDP_TUNNEL
+ select NET_IP_TUNNEL
+ ---help---
+ This allows one to create gtp virtual interfaces that provide
+ the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
+ is used to prevent subscribers from accessing mobile carrier core
+ network infrastructure. This driver requires a userspace software that
+ implements the signaling protocol (GTP-C) to update its PDP context
+ base, such as OpenGGSN <http://git.osmocom.org/openggsn/). This
+ tunneling protocol is implemented according to the GSM TS 09.60 and
+ 3GPP TS 29.060 standards.
+
+ To compile this drivers as a module, choose M here: the module
+ wil be called gtp.
+
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
select CRYPTO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 1aa7cb845..7336cbd3e 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
obj-$(CONFIG_VXLAN) += vxlan.o
obj-$(CONFIG_GENEVE) += geneve.o
+obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
obj-$(CONFIG_NET_VRF) += vrf.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 67977f15a..11fe71278 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -35,8 +35,8 @@
#include <net/Space.h>
/* A unified ethernet device probe. This is the easiest way to have every
- ethernet adaptor have the name "eth[0123...]".
- */
+ * ethernet adaptor have the name "eth[0123...]".
+ */
struct devprobe2 {
struct net_device *(*probe)(int unit);
@@ -46,6 +46,7 @@ struct devprobe2 {
static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
{
struct net_device *dev;
+
for (; p->probe; p++) {
if (autoprobe && p->status)
continue;
@@ -58,8 +59,7 @@ static int __init probe_list2(int unit, struct devprobe2 *p, int autoprobe)
return -ENODEV;
}
-/*
- * ISA probes that touch addresses < 0x400 (including those that also
+/* ISA probes that touch addresses < 0x400 (including those that also
* look for EISA/PCI cards in addition to ISA cards).
*/
static struct devprobe2 isa_probes[] __initdata = {
@@ -86,11 +86,11 @@ static struct devprobe2 isa_probes[] __initdata = {
#endif
#ifdef CONFIG_CS89x0
#ifndef CONFIG_CS89x0_PLATFORM
- {cs89x0_probe, 0},
+ {cs89x0_probe, 0},
#endif
#endif
-#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel I82596 */
- {i82596_probe, 0},
+#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_BVME6000_NET) /* Intel */
+ {i82596_probe, 0}, /* I82596 */
#endif
#ifdef CONFIG_NI65
{ni65_probe, 0},
@@ -118,13 +118,12 @@ static struct devprobe2 m68k_probes[] __initdata = {
{mac8390_probe, 0},
#endif
#ifdef CONFIG_MAC89x0
- {mac89x0_probe, 0},
+ {mac89x0_probe, 0},
#endif
{NULL, 0},
};
-/*
- * Unified ethernet device probe, segmented per architecture and
+/* Unified ethernet device probe, segmented per architecture and
* per bus interface. This drives the legacy devices only for now.
*/
@@ -135,7 +134,7 @@ static void __init ethif_probe2(int unit)
if (base_addr == 1)
return;
- (void)( probe_list2(unit, m68k_probes, base_addr == 0) &&
+ (void)(probe_list2(unit, m68k_probes, base_addr == 0) &&
probe_list2(unit, isa_probes, base_addr == 0));
}
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index 1b650f5a5..cbc785a0e 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -860,7 +860,7 @@ static void cops_timeout(struct net_device *dev)
}
printk(KERN_WARNING "%s: Transmit timed out.\n", dev->name);
cops_jumpstart(dev); /* Restart the card. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index 0d9b45ff1..81f90c470 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -433,7 +433,7 @@ static void __init com90xx_probe(void)
kfree(iomem);
}
-static int check_mirror(unsigned long addr, size_t size)
+static int __init check_mirror(unsigned long addr, size_t size)
{
void __iomem *p;
int res = -1;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index b9304a295..edc70ffad 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -101,11 +101,14 @@ enum ad_link_speed_type {
#define MAC_ADDRESS_EQUAL(A, B) \
ether_addr_equal_64bits((const u8 *)A, (const u8 *)B)
-static struct mac_addr null_mac_addr = { { 0, 0, 0, 0, 0, 0 } };
+static const u8 null_mac_addr[ETH_ALEN + 2] __long_aligned = {
+ 0, 0, 0, 0, 0, 0
+};
static u16 ad_ticks_per_sec;
static const int ad_delta_in_ticks = (AD_TIMER_INTERVAL * HZ) / 1000;
-static const u8 lacpdu_mcast_addr[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+static const u8 lacpdu_mcast_addr[ETH_ALEN + 2] __long_aligned =
+ MULTICAST_LACPDU_ADDR;
/* ================= main 802.3ad protocol functions ================== */
static int ad_lacpdu_send(struct port *port);
@@ -657,6 +660,20 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
}
}
+static int __agg_active_ports(struct aggregator *agg)
+{
+ struct port *port;
+ int active = 0;
+
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (port->is_enabled)
+ active++;
+ }
+
+ return active;
+}
+
/**
* __get_agg_bandwidth - get the total bandwidth of an aggregator
* @aggregator: the aggregator we're looking at
@@ -664,39 +681,40 @@ static void __set_agg_ports_ready(struct aggregator *aggregator, int val)
*/
static u32 __get_agg_bandwidth(struct aggregator *aggregator)
{
+ int nports = __agg_active_ports(aggregator);
u32 bandwidth = 0;
- if (aggregator->num_of_ports) {
+ if (nports) {
switch (__get_link_speed(aggregator->lag_ports)) {
case AD_LINK_SPEED_1MBPS:
- bandwidth = aggregator->num_of_ports;
+ bandwidth = nports;
break;
case AD_LINK_SPEED_10MBPS:
- bandwidth = aggregator->num_of_ports * 10;
+ bandwidth = nports * 10;
break;
case AD_LINK_SPEED_100MBPS:
- bandwidth = aggregator->num_of_ports * 100;
+ bandwidth = nports * 100;
break;
case AD_LINK_SPEED_1000MBPS:
- bandwidth = aggregator->num_of_ports * 1000;
+ bandwidth = nports * 1000;
break;
case AD_LINK_SPEED_2500MBPS:
- bandwidth = aggregator->num_of_ports * 2500;
+ bandwidth = nports * 2500;
break;
case AD_LINK_SPEED_10000MBPS:
- bandwidth = aggregator->num_of_ports * 10000;
+ bandwidth = nports * 10000;
break;
case AD_LINK_SPEED_20000MBPS:
- bandwidth = aggregator->num_of_ports * 20000;
+ bandwidth = nports * 20000;
break;
case AD_LINK_SPEED_40000MBPS:
- bandwidth = aggregator->num_of_ports * 40000;
+ bandwidth = nports * 40000;
break;
case AD_LINK_SPEED_56000MBPS:
- bandwidth = aggregator->num_of_ports * 56000;
+ bandwidth = nports * 56000;
break;
case AD_LINK_SPEED_100000MBPS:
- bandwidth = aggregator->num_of_ports * 100000;
+ bandwidth = nports * 100000;
break;
default:
bandwidth = 0; /* to silence the compiler */
@@ -1530,10 +1548,10 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
switch (__get_agg_selection_mode(curr->lag_ports)) {
case BOND_AD_COUNT:
- if (curr->num_of_ports > best->num_of_ports)
+ if (__agg_active_ports(curr) > __agg_active_ports(best))
return curr;
- if (curr->num_of_ports < best->num_of_ports)
+ if (__agg_active_ports(curr) < __agg_active_ports(best))
return best;
/*FALLTHROUGH*/
@@ -1561,8 +1579,14 @@ static int agg_device_up(const struct aggregator *agg)
if (!port)
return 0;
- return netif_running(port->slave->dev) &&
- netif_carrier_ok(port->slave->dev);
+ for (port = agg->lag_ports; port;
+ port = port->next_port_in_aggregator) {
+ if (netif_running(port->slave->dev) &&
+ netif_carrier_ok(port->slave->dev))
+ return 1;
+ }
+
+ return 0;
}
/**
@@ -1610,7 +1634,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
agg->is_active = 0;
- if (agg->num_of_ports && agg_device_up(agg))
+ if (__agg_active_ports(agg) && agg_device_up(agg))
best = ad_agg_selection_test(best, agg);
}
@@ -1622,7 +1646,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
* answering partner.
*/
if (active && active->lag_ports &&
- active->lag_ports->is_enabled &&
+ __agg_active_ports(active) &&
(__agg_has_partner(active) ||
(!__agg_has_partner(active) &&
!__agg_has_partner(best)))) {
@@ -1718,7 +1742,7 @@ static void ad_clear_agg(struct aggregator *aggregator)
aggregator->is_individual = false;
aggregator->actor_admin_aggregator_key = 0;
aggregator->actor_oper_aggregator_key = 0;
- aggregator->partner_system = null_mac_addr;
+ eth_zero_addr(aggregator->partner_system.mac_addr_value);
aggregator->partner_system_priority = 0;
aggregator->partner_oper_aggregator_key = 0;
aggregator->receive_state = 0;
@@ -1740,7 +1764,7 @@ static void ad_initialize_agg(struct aggregator *aggregator)
if (aggregator) {
ad_clear_agg(aggregator);
- aggregator->aggregator_mac_address = null_mac_addr;
+ eth_zero_addr(aggregator->aggregator_mac_address.mac_addr_value);
aggregator->aggregator_identifier = 0;
aggregator->slave = NULL;
}
@@ -2133,7 +2157,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
else
temp_aggregator->lag_ports = temp_port->next_port_in_aggregator;
temp_aggregator->num_of_ports--;
- if (temp_aggregator->num_of_ports == 0) {
+ if (__agg_active_ports(temp_aggregator) == 0) {
select_new_active_agg = temp_aggregator->is_active;
ad_clear_agg(temp_aggregator);
if (select_new_active_agg) {
@@ -2432,7 +2456,9 @@ void bond_3ad_adapter_speed_duplex_changed(struct slave *slave)
*/
void bond_3ad_handle_link_change(struct slave *slave, char link)
{
+ struct aggregator *agg;
struct port *port;
+ bool dummy;
port = &(SLAVE_AD_INFO(slave)->port);
@@ -2459,6 +2485,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->is_enabled = false;
ad_update_actor_keys(port, true);
}
+ agg = __get_first_agg(port);
+ ad_agg_selection_logic(agg, &dummy);
+
netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
port->actor_port_number,
link == BOND_LINK_UP ? "UP" : "DOWN");
@@ -2499,7 +2528,7 @@ int bond_3ad_set_carrier(struct bonding *bond)
active = __get_active_agg(&(SLAVE_AD_INFO(first_slave)->aggregator));
if (active) {
/* are enough slaves available to consider link up? */
- if (active->num_of_ports < bond->params.min_links) {
+ if (__agg_active_ports(active) < bond->params.min_links) {
if (netif_carrier_ok(bond->dev)) {
netif_carrier_off(bond->dev);
goto out;
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index c5ac160a8..551f0f8de 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -42,13 +42,10 @@
-#ifndef __long_aligned
-#define __long_aligned __attribute__((aligned((sizeof(long)))))
-#endif
-static const u8 mac_bcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_bcast[ETH_ALEN + 2] __long_aligned = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
-static const u8 mac_v6_allmcast[ETH_ALEN] __long_aligned = {
+static const u8 mac_v6_allmcast[ETH_ALEN + 2] __long_aligned = {
0x33, 0x33, 0x00, 0x00, 0x00, 0x01
};
static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 941ec99cd..4d7981946 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1422,7 +1422,16 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return -EINVAL;
}
- if (slave_ops->ndo_set_mac_address == NULL) {
+ if (slave_dev->type == ARPHRD_INFINIBAND &&
+ BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ netdev_warn(bond_dev, "Type (%d) supports only active-backup mode\n",
+ slave_dev->type);
+ res = -EOPNOTSUPP;
+ goto err_undo_flags;
+ }
+
+ if (!slave_ops->ndo_set_mac_address ||
+ slave_dev->type == ARPHRD_INFINIBAND) {
netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
@@ -1584,6 +1593,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
/* check for initial state */
+ new_slave->link = BOND_LINK_NOCHANGE;
if (bond->params.miimon) {
if (bond_check_dev_link(bond, slave_dev, 0) == BMSR_LSTATUS) {
if (bond->params.updelay) {
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index db760e841..b8df0f5e8 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -446,7 +446,11 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
if (err < 0)
return err;
- return register_netdevice(bond_dev);
+ err = register_netdevice(bond_dev);
+
+ netif_carrier_off(bond_dev);
+
+ return err;
}
static size_t bond_get_size(const struct net_device *bond_dev)
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8b3275d77..8f5e93cb7 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -712,9 +712,10 @@ static int at91_poll_rx(struct net_device *dev, int quota)
/* upper group completed, look again in lower */
if (priv->rx_next > get_mb_rx_low_last(priv) &&
- quota > 0 && mb > get_mb_rx_last(priv)) {
+ mb > get_mb_rx_last(priv)) {
priv->rx_next = get_mb_rx_first(priv);
- goto again;
+ if (quota > 0)
+ goto again;
}
return received;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f91b09428..e3dccd320 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -332,9 +332,23 @@ static void c_can_setup_tx_object(struct net_device *dev, int iface,
priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
- for (i = 0; i < frame->can_dlc; i += 2) {
- priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
- frame->data[i] | (frame->data[i + 1] << 8));
+ if (priv->type == BOSCH_D_CAN) {
+ u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface);
+
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = (u32)frame->data[i];
+ data |= (u32)frame->data[i + 1] << 8;
+ data |= (u32)frame->data[i + 2] << 16;
+ data |= (u32)frame->data[i + 3] << 24;
+ priv->write_reg32(priv, dreg, data);
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2) {
+ priv->write_reg(priv,
+ C_CAN_IFACE(DATA1_REG, iface) + i / 2,
+ frame->data[i] |
+ (frame->data[i + 1] << 8));
+ }
}
}
@@ -402,10 +416,20 @@ static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
} else {
int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
- for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
- data = priv->read_reg(priv, dreg);
- frame->data[i] = data;
- frame->data[i + 1] = data >> 8;
+ if (priv->type == BOSCH_D_CAN) {
+ for (i = 0; i < frame->can_dlc; i += 4, dreg += 2) {
+ data = priv->read_reg32(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ frame->data[i + 2] = data >> 16;
+ frame->data[i + 3] = data >> 24;
+ }
+ } else {
+ for (i = 0; i < frame->can_dlc; i += 2, dreg++) {
+ data = priv->read_reg(priv, dreg);
+ frame->data[i] = data;
+ frame->data[i + 1] = data >> 8;
+ }
}
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 910c12e26..ad535a854 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -798,6 +798,9 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[])
* - control mode with CAN_CTRLMODE_FD set
*/
+ if (!data)
+ return 0;
+
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
@@ -1008,6 +1011,11 @@ static int can_newlink(struct net *src_net, struct net_device *dev,
return -EOPNOTSUPP;
}
+static void can_dellink(struct net_device *dev, struct list_head *head)
+{
+ return;
+}
+
static struct rtnl_link_ops can_link_ops __read_mostly = {
.kind = "can",
.maxtype = IFLA_CAN_MAX,
@@ -1016,6 +1024,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
.validate = can_validate,
.newlink = can_newlink,
.changelink = can_changelink,
+ .dellink = can_dellink,
.get_size = can_get_size,
.fill_info = can_fill_info,
.get_xstats_size = can_get_xstats_size,
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index a1bd54ffd..2d1d22eec 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -34,6 +34,7 @@
#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
#define IFI_CANFD_STCMD_ENABLE_ISO BIT(25)
+#define IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING BIT(26)
#define IFI_CANFD_STCMD_NORMAL_MODE ((u32)BIT(31))
#define IFI_CANFD_RXSTCMD 0x4
@@ -51,7 +52,8 @@
#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
#define IFI_CANFD_INTERRUPT 0xc
-#define IFI_CANFD_INTERRUPT_ERROR_WARNING ((u32)BIT(1))
+#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
+#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
#define IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY BIT(24)
@@ -71,12 +73,12 @@
#define IFI_CANFD_TIME_TIMEB_OFF 0
#define IFI_CANFD_TIME_TIMEA_OFF 8
#define IFI_CANFD_TIME_PRESCALE_OFF 16
-#define IFI_CANFD_TIME_SJW_OFF_ISO 25
-#define IFI_CANFD_TIME_SJW_OFF_BOSCH 28
-#define IFI_CANFD_TIME_SET_SJW_BOSCH BIT(6)
-#define IFI_CANFD_TIME_SET_TIMEB_BOSCH BIT(7)
-#define IFI_CANFD_TIME_SET_PRESC_BOSCH BIT(14)
-#define IFI_CANFD_TIME_SET_TIMEA_BOSCH BIT(15)
+#define IFI_CANFD_TIME_SJW_OFF_7_9_8_8 25
+#define IFI_CANFD_TIME_SJW_OFF_4_12_6_6 28
+#define IFI_CANFD_TIME_SET_SJW_4_12_6_6 BIT(6)
+#define IFI_CANFD_TIME_SET_TIMEB_4_12_6_6 BIT(7)
+#define IFI_CANFD_TIME_SET_PRESC_4_12_6_6 BIT(14)
+#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
@@ -102,7 +104,26 @@
#define IFI_CANFD_RES1 0x40
-#define IFI_CANFD_RES2 0x44
+#define IFI_CANFD_ERROR_CTR 0x44
+#define IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC 0x21302899
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST BIT(0)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST BIT(1)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST BIT(2)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST BIT(3)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST BIT(4)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST BIT(5)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST BIT(6)
+#define IFI_CANFD_ERROR_CTR_OVERLOAD_ALL BIT(8)
+#define IFI_CANFD_ERROR_CTR_ACK_ERROR_ALL BIT(9)
+#define IFI_CANFD_ERROR_CTR_BIT0_ERROR_ALL BIT(10)
+#define IFI_CANFD_ERROR_CTR_BIT1_ERROR_ALL BIT(11)
+#define IFI_CANFD_ERROR_CTR_STUFF_ERROR_ALL BIT(12)
+#define IFI_CANFD_ERROR_CTR_CRC_ERROR_ALL BIT(13)
+#define IFI_CANFD_ERROR_CTR_FORM_ERROR_ALL BIT(14)
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_OFFSET 16
+#define IFI_CANFD_ERROR_CTR_BITPOSITION_MASK 0xff
+#define IFI_CANFD_ERROR_CTR_ER_RESET BIT(30)
+#define IFI_CANFD_ERROR_CTR_ER_ENABLE ((u32)BIT(31))
#define IFI_CANFD_PAR 0x48
@@ -196,6 +217,8 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
if (enable) {
enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
IFI_CANFD_IRQMASK_RXFIFO_NEMPTY;
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
}
writel(IFI_CANFD_IRQMASK_SET_ERR |
@@ -334,6 +357,68 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
return 1;
}
+static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr)
+{
+ struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
+ IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST |
+ IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST;
+
+ if (!(errctr & errmask)) /* No error happened. */
+ return 0;
+
+ priv->can.can_stats.bus_error++;
+ stats->rx_errors++;
+
+ /* Propagate the error condition to the CAN stack. */
+ skb = alloc_can_err_skb(ndev, &cf);
+ if (unlikely(!skb))
+ return 0;
+
+ /* Read the error counter register and check for new errors. */
+ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST)
+ cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_BIT1_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_STUFF_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_CRC_ERROR_FIRST)
+ cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+
+ if (errctr & IFI_CANFD_ERROR_CTR_FORM_ERROR_FIRST)
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+
+ /* Reset the error counter, ack the IRQ and re-enable the counter. */
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_INTERRUPT_ERROR_COUNTER,
+ priv->base + IFI_CANFD_INTERRUPT);
+ writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_receive_skb(skb);
+
+ return 1;
+}
+
static int ifi_canfd_get_berr_counter(const struct net_device *ndev,
struct can_berr_counter *bec)
{
@@ -469,6 +554,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
u32 rxstcmd = readl(priv->base + IFI_CANFD_STCMD);
+ u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
/* Handle bus state changes */
if ((stcmd & stcmd_state_mask) ||
@@ -479,6 +565,10 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
work_done += ifi_canfd_handle_lost_msg(ndev);
+ /* Handle lec errors on the bus */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ work_done += ifi_canfd_handle_lec_err(ndev, errctr);
+
/* Handle normal messages on RX */
if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
@@ -497,11 +587,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
struct ifi_canfd_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
- IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER;
+ IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING |
+ IFI_CANFD_INTERRUPT_ERROR_COUNTER;
const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
- const u32 clr_irq_mask = ~(IFI_CANFD_INTERRUPT_SET_IRQ |
- IFI_CANFD_INTERRUPT_ERROR_WARNING);
+ const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ |
+ IFI_CANFD_INTERRUPT_ERROR_WARNING));
u32 isr;
isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -513,44 +605,34 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
/* Clear all pending interrupts but ErrWarn */
writel(clr_irq_mask, priv->base + IFI_CANFD_INTERRUPT);
- /* RX IRQ, start NAPI */
+ /* RX IRQ or bus warning, start NAPI */
if (isr & rx_irq_mask) {
ifi_canfd_irq_enable(ndev, 0);
napi_schedule(&priv->napi);
}
/* TX IRQ */
- if (isr & tx_irq_mask) {
+ if (isr & IFI_CANFD_INTERRUPT_TXFIFO_REMOVE) {
stats->tx_bytes += can_get_echo_skb(ndev, 0);
stats->tx_packets++;
can_led_event(ndev, CAN_LED_EVENT_TX);
- netif_wake_queue(ndev);
}
+ if (isr & tx_irq_mask)
+ netif_wake_queue(ndev);
+
return IRQ_HANDLED;
}
static const struct can_bittiming_const ifi_canfd_bittiming_const = {
.name = KBUILD_MODNAME,
.tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
- .tseg2_min = 2, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 64,
- .sjw_max = 16,
- .brp_min = 2,
- .brp_max = 256,
- .brp_inc = 1,
-};
-
-static const struct can_bittiming_const ifi_canfd_data_bittiming_const = {
- .name = KBUILD_MODNAME,
- .tseg1_min = 1, /* Time segment 1 = prop_seg + phase_seg1 */
- .tseg1_max = 64,
+ .tseg1_max = 256,
.tseg2_min = 2, /* Time segment 2 = phase_seg2 */
- .tseg2_max = 64,
- .sjw_max = 16,
+ .tseg2_max = 256,
+ .sjw_max = 128,
.brp_min = 2,
- .brp_max = 256,
+ .brp_max = 512,
.brp_inc = 1,
};
@@ -560,19 +642,6 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
- u32 noniso_arg = 0;
- u32 time_off;
-
- if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
- !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) {
- time_off = IFI_CANFD_TIME_SJW_OFF_ISO;
- } else {
- noniso_arg = IFI_CANFD_TIME_SET_TIMEB_BOSCH |
- IFI_CANFD_TIME_SET_TIMEA_BOSCH |
- IFI_CANFD_TIME_SET_PRESC_BOSCH |
- IFI_CANFD_TIME_SET_SJW_BOSCH;
- time_off = IFI_CANFD_TIME_SJW_OFF_BOSCH;
- }
/* Configure bit timing */
brp = bt->brp - 2;
@@ -582,8 +651,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
- (sjw << time_off) |
- noniso_arg,
+ (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_TIME);
/* Configure data bit timing */
@@ -594,8 +662,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
writel((tseg2 << IFI_CANFD_TIME_TIMEB_OFF) |
(tseg1 << IFI_CANFD_TIME_TIMEA_OFF) |
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
- (sjw << time_off) |
- noniso_arg,
+ (sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
}
@@ -640,7 +707,8 @@ static void ifi_canfd_start(struct net_device *ndev)
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
- writel(0, priv->base + IFI_CANFD_STCMD);
+ writel(IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING,
+ priv->base + IFI_CANFD_STCMD);
ifi_canfd_set_bittiming(ndev);
ifi_canfd_set_filters(ndev);
@@ -659,7 +727,8 @@ static void ifi_canfd_start(struct net_device *ndev)
writel((u32)(~IFI_CANFD_INTERRUPT_SET_IRQ),
priv->base + IFI_CANFD_INTERRUPT);
- stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE;
+ stcmd = IFI_CANFD_STCMD_ENABLE | IFI_CANFD_STCMD_NORMAL_MODE |
+ IFI_CANFD_STCMD_ENABLE_7_9_8_8_TIMING;
if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
stcmd |= IFI_CANFD_STCMD_BUSMONITOR;
@@ -667,16 +736,23 @@ static void ifi_canfd_start(struct net_device *ndev)
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
stcmd |= IFI_CANFD_STCMD_LOOPBACK;
- if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
+ if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) &&
+ !(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO))
stcmd |= IFI_CANFD_STCMD_ENABLE_ISO;
- if (!(priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)))
+ if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD))
stcmd |= IFI_CANFD_STCMD_DISABLE_CANFD;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
ifi_canfd_irq_enable(ndev, 1);
+ /* Unlock, reset and enable the error counter. */
+ writel(IFI_CANFD_ERROR_CTR_UNLOCK_MAGIC,
+ priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(IFI_CANFD_ERROR_CTR_ER_ENABLE, priv->base + IFI_CANFD_ERROR_CTR);
+
/* Enable controller */
writel(stcmd, priv->base + IFI_CANFD_STCMD);
}
@@ -685,6 +761,10 @@ static void ifi_canfd_stop(struct net_device *ndev)
{
struct ifi_canfd_priv *priv = netdev_priv(ndev);
+ /* Reset and disable the error counter. */
+ writel(IFI_CANFD_ERROR_CTR_ER_RESET, priv->base + IFI_CANFD_ERROR_CTR);
+ writel(0, priv->base + IFI_CANFD_ERROR_CTR);
+
/* Reset the IP */
writel(IFI_CANFD_STCMD_HARDRESET, priv->base + IFI_CANFD_STCMD);
@@ -877,7 +957,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = readl(addr + IFI_CANFD_CANCLOCK);
priv->can.bittiming_const = &ifi_canfd_bittiming_const;
- priv->can.data_bittiming_const = &ifi_canfd_data_bittiming_const;
+ priv->can.data_bittiming_const = &ifi_canfd_bittiming_const;
priv->can.do_set_mode = ifi_canfd_set_mode;
priv->can.do_get_berr_counter = ifi_canfd_get_berr_counter;
@@ -888,7 +968,8 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_FD |
- CAN_CTRLMODE_FD_NON_ISO;
+ CAN_CTRLMODE_FD_NON_ISO |
+ CAN_CTRLMODE_BERR_REPORTING;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, dev);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 5d04f5464..f13bb8d9b 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -84,6 +84,7 @@
#define MSG_COFFREQ 0x42
#define MSG_CONREQ 0x43
#define MSG_CCONFREQ 0x47
+#define MSG_NMTS 0xb0
#define MSG_LMTS 0xb4
/*
@@ -130,6 +131,22 @@
#define ICAN3_CAN_DLC_MASK 0x0f
+/* Janz ICAN3 NMTS subtypes */
+#define NMTS_CREATE_NODE_REQ 0x0
+#define NMTS_SLAVE_STATE_IND 0x8
+#define NMTS_SLAVE_EVENT_IND 0x9
+
+/* Janz ICAN3 LMTS subtypes */
+#define LMTS_BUSON_REQ 0x0
+#define LMTS_BUSOFF_REQ 0x1
+#define LMTS_CAN_CONF_REQ 0x2
+
+/* Janz ICAN3 NMTS Event indications */
+#define NE_LOCAL_OCCURRED 0x3
+#define NE_LOCAL_RESOLVED 0x2
+#define NE_REMOTE_OCCURRED 0xc
+#define NE_REMOTE_RESOLVED 0x8
+
/*
* SJA1000 Status and Error Register Definitions
*
@@ -800,21 +817,41 @@ static int ican3_set_bus_state(struct ican3_dev *mod, bool on)
return ican3_send_msg(mod, &msg);
} else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ /* bittiming + can-on/off request */
memset(&msg, 0, sizeof(msg));
msg.spec = MSG_LMTS;
if (on) {
msg.len = cpu_to_le16(4);
- msg.data[0] = 0;
+ msg.data[0] = LMTS_BUSON_REQ;
msg.data[1] = 0;
msg.data[2] = btr0;
msg.data[3] = btr1;
} else {
msg.len = cpu_to_le16(2);
- msg.data[0] = 1;
+ msg.data[0] = LMTS_BUSOFF_REQ;
msg.data[1] = 0;
}
+ res = ican3_send_msg(mod, &msg);
+ if (res)
+ return res;
- return ican3_send_msg(mod, &msg);
+ if (on) {
+ /* create NMT Slave Node for error processing
+ * class 2 (with error capability, see CiA/DS203-1)
+ * id 1
+ * name locnod1 (must be exactly 7 bytes)
+ */
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_NMTS;
+ msg.len = cpu_to_le16(11);
+ msg.data[0] = NMTS_CREATE_NODE_REQ;
+ msg.data[1] = 0;
+ msg.data[2] = 2; /* node class */
+ msg.data[3] = 1; /* node id */
+ strcpy(msg.data + 4, "locnod1"); /* node name */
+ return ican3_send_msg(mod, &msg);
+ }
+ return 0;
}
return -ENOTSUPP;
}
@@ -849,12 +886,23 @@ static int ican3_set_buserror(struct ican3_dev *mod, u8 quota)
{
struct ican3_msg msg;
- memset(&msg, 0, sizeof(msg));
- msg.spec = MSG_CCONFREQ;
- msg.len = cpu_to_le16(2);
- msg.data[0] = 0x00;
- msg.data[1] = quota;
-
+ if (mod->fwtype == ICAN3_FWTYPE_ICANOS) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_CCONFREQ;
+ msg.len = cpu_to_le16(2);
+ msg.data[0] = 0x00;
+ msg.data[1] = quota;
+ } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) {
+ memset(&msg, 0, sizeof(msg));
+ msg.spec = MSG_LMTS;
+ msg.len = cpu_to_le16(4);
+ msg.data[0] = LMTS_CAN_CONF_REQ;
+ msg.data[1] = 0x00;
+ msg.data[2] = 0x00;
+ msg.data[3] = quota;
+ } else {
+ return -ENOTSUPP;
+ }
return ican3_send_msg(mod, &msg);
}
@@ -1150,6 +1198,41 @@ static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg)
}
}
+/* Handle NMTS Slave Event Indication Messages from the firmware */
+static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg)
+{
+ u16 subspec;
+
+ subspec = msg->data[0] + msg->data[1] * 0x100;
+ if (subspec == NMTS_SLAVE_EVENT_IND) {
+ switch (msg->data[2]) {
+ case NE_LOCAL_OCCURRED:
+ case NE_LOCAL_RESOLVED:
+ /* now follows the same message as Raw ICANOS CEVTIND
+ * shift the data at the same place and call this method
+ */
+ le16_add_cpu(&msg->len, -3);
+ memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len));
+ ican3_handle_cevtind(mod, msg);
+ break;
+ case NE_REMOTE_OCCURRED:
+ case NE_REMOTE_RESOLVED:
+ /* should not occurre, ignore */
+ break;
+ default:
+ netdev_warn(mod->ndev, "unknown NMTS event indication %x\n",
+ msg->data[2]);
+ break;
+ }
+ } else if (subspec == NMTS_SLAVE_STATE_IND) {
+ /* ignore state indications */
+ } else {
+ netdev_warn(mod->ndev, "unhandled NMTS indication %x\n",
+ subspec);
+ return;
+ }
+}
+
static void ican3_handle_unknown_message(struct ican3_dev *mod,
struct ican3_msg *msg)
{
@@ -1179,6 +1262,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
case MSG_INQUIRY:
ican3_handle_inquiry(mod, msg);
break;
+ case MSG_NMTS:
+ ican3_handle_nmtsind(mod, msg);
+ break;
default:
ican3_handle_unknown_message(mod, msg);
break;
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e36b7400d..acb708fc1 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -276,7 +276,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
out_8(&regs->cantflg, 1 << buf_id);
if (!test_bit(F_TX_PROGRESS, &priv->flags))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
@@ -469,7 +469,7 @@ static irqreturn_t mscan_isr(int irq, void *dev_id)
clear_bit(F_TX_PROGRESS, &priv->flags);
priv->cur_pri = 0;
} else {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 8836a7485..3eb7430df 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -39,6 +39,7 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
"Adlink PCI-7841/cPCI-7841 SE, "
"Marathon CAN-bus-PCI, "
+ "Marathon CAN-bus-PCIe, "
"TEWS TECHNOLOGIES TPMC810, "
"esd CAN-PCI/CPCI/PCI104/200, "
"esd CAN-PCI/PMC/266, "
@@ -133,6 +134,7 @@ struct plx_pci_card {
#define IXXAT_PCI_SUB_SYS_ID 0x2540
#define MARATHON_PCI_DEVICE_ID 0x2715
+#define MARATHON_PCIE_DEVICE_ID 0x3432
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
@@ -141,8 +143,9 @@ struct plx_pci_card {
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
static void plx_pci_reset_common(struct pci_dev *pdev);
-static void plx_pci_reset_marathon(struct pci_dev *pdev);
static void plx9056_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev);
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev);
struct plx_pci_channel_map {
u32 bar;
@@ -215,14 +218,22 @@ static struct plx_pci_card_info plx_pci_card_info_ixxat = {
/* based on PLX9050 */
};
-static struct plx_pci_card_info plx_pci_card_info_marathon = {
+static struct plx_pci_card_info plx_pci_card_info_marathon_pci = {
"Marathon CAN-bus-PCI", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
- &plx_pci_reset_marathon
+ &plx_pci_reset_marathon_pci
/* based on PLX9052 */
};
+static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = {
+ "Marathon CAN-bus-PCIe", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} },
+ &plx_pci_reset_marathon_pcie
+ /* based on PEX8311 */
+};
+
static struct plx_pci_card_info plx_pci_card_info_tews = {
"TEWS TECHNOLOGIES TPMC810", 2,
PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -316,7 +327,14 @@ static const struct pci_device_id plx_pci_tbl[] = {
PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
PCI_ANY_ID, PCI_ANY_ID,
0, 0,
- (kernel_ulong_t)&plx_pci_card_info_marathon
+ (kernel_ulong_t)&plx_pci_card_info_marathon_pci
+ },
+ {
+ /* Marathon CAN-bus-PCIe card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon_pcie
},
{
/* TEWS TECHNOLOGIES TPMC810 card */
@@ -437,8 +455,8 @@ static void plx9056_pci_reset_common(struct pci_dev *pdev)
iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
};
-/* Special reset function for Marathon card */
-static void plx_pci_reset_marathon(struct pci_dev *pdev)
+/* Special reset function for Marathon CAN-bus-PCI card */
+static void plx_pci_reset_marathon_pci(struct pci_dev *pdev)
{
void __iomem *reset_addr;
int i;
@@ -460,6 +478,34 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
}
}
+/* Special reset function for Marathon CAN-bus-PCIe card */
+static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev)
+{
+ void __iomem *addr;
+ void __iomem *reset_addr;
+ int i;
+
+ plx9056_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ struct plx_pci_channel_map *chan_map =
+ &plx_pci_card_info_marathon_pcie.chan_map_tbl[i];
+ addr = pci_iomap(pdev, chan_map->bar, chan_map->size);
+ if (!addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, chan_map->bar);
+ } else {
+ /* reset the SJA1000 chip */
+ #define MARATHON_PCIE_RESET_OFFSET 32
+ reset_addr = addr + chan_map->offset +
+ MARATHON_PCIE_RESET_OFFSET;
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, addr);
+ }
+ }
+}
+
static void plx_pci_del_card(struct pci_dev *pdev)
{
struct plx_pci_card *card = pci_get_drvdata(pdev);
@@ -486,7 +532,8 @@ static void plx_pci_del_card(struct pci_dev *pdev)
* Disable interrupts from PCI-card and disable local
* interrupts
*/
- if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+ pdev->device != MARATHON_PCIE_DEVICE_ID)
iowrite32(0x0, card->conf_addr + PLX_INTCSR);
else
iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
@@ -619,7 +666,8 @@ static int plx_pci_add_card(struct pci_dev *pdev,
* Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
* Local_2 interrupts from the SJA1000 chips
*/
- if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+ if (pdev->device != PCI_DEVICE_ID_PLX_9056 &&
+ pdev->device != MARATHON_PCIE_DEVICE_ID) {
val = ioread32(card->conf_addr + PLX_INTCSR);
if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 8dda3b703..9f107798f 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -438,6 +438,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+ /* set error type */
switch (ecc & ECC_MASK) {
case ECC_BIT:
cf->data[2] |= CAN_ERR_PROT_BIT;
@@ -449,9 +450,12 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
cf->data[2] |= CAN_ERR_PROT_STUFF;
break;
default:
- cf->data[3] = ecc & ECC_SEG;
break;
}
+
+ /* set error location */
+ cf->data[3] = ecc & ECC_SEG;
+
/* Error occurred during transmission? */
if ((ecc & ECC_DIR) == 0)
cf->data[2] |= CAN_ERR_PROT_TX;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 74a7dfece..cf36d26ef 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -961,7 +961,8 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock;
}
- priv->wq = create_freezable_workqueue("mcp251x_wq");
+ priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ 0);
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bcb272f6c..8483a40e7 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -16,7 +16,8 @@ config CAN_ESD_USB2
config CAN_GS_USB
tristate "Geschwister Schneider UG interfaces"
---help---
- This driver supports the Geschwister Schneider USB/CAN devices.
+ This driver supports the Geschwister Schneider and bytewerk.org
+ candleLight USB CAN interfaces USB/CAN devices
If unsure choose N,
choose Y for built in support,
M to compile as module (module will be named: gs_usb).
@@ -46,6 +47,8 @@ config CAN_KVASER_USB
- Kvaser USBcan R
- Kvaser Leaf Light v2
- Kvaser Mini PCI Express HS
+ - Kvaser Mini PCI Express 2xHS
+ - Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
- Kvaser USBcan Rugged ("USBcan Rev B")
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 3400fd1ca..71f0e7913 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -521,7 +521,7 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* transmission complete interrupt */
netdev->stats.tx_packets++;
@@ -835,7 +835,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
stats->tx_dropped++;
}
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* Slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 113e64fcd..784a9002f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -480,7 +480,7 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
static ssize_t show_firmware(struct device *d,
@@ -820,7 +820,7 @@ static netdev_tx_t esd_usb2_start_xmit(struct sk_buff *skb,
goto releasebuf;
}
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/*
* Release our reference to this URB, the USB core will eventually free
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index cbc99d564..acb0c8490 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -1,7 +1,9 @@
-/* CAN driver for Geschwister Schneider USB/CAN devices.
+/* CAN driver for Geschwister Schneider USB/CAN devices
+ * and bytewerk.org candleLight USB CAN interfaces.
*
- * Copyright (C) 2013 Geschwister Schneider Technologie-,
+ * Copyright (C) 2013-2016 Geschwister Schneider Technologie-,
* Entwicklungs- und Vertriebs UG (Haftungsbeschränkt).
+ * Copyright (C) 2016 Hubert Denkmair
*
* Many thanks to all socketcan devs!
*
@@ -29,6 +31,9 @@
#define USB_GSUSB_1_VENDOR_ID 0x1d50
#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_CANDLELIGHT_VENDOR_ID 0x1209
+#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
+
#define GSUSB_ENDPOINT_IN 1
#define GSUSB_ENDPOINT_OUT 2
@@ -950,7 +955,10 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- {USB_DEVICE(USB_GSUSB_1_VENDOR_ID, USB_GSUSB_1_PRODUCT_ID)},
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
+ USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
+ USB_CANDLELIGHT_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
@@ -968,5 +976,6 @@ module_usb_driver(gs_usb_driver);
MODULE_AUTHOR("Maximilian Schneider <mws@schneidersoft.net>");
MODULE_DESCRIPTION(
"Socket CAN device driver for Geschwister Schneider Technologie-, "
-"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces.");
+"Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n"
+"and bytewerk.org candleLight USB CAN interfaces.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 022bfa13e..6f1f3b675 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -59,11 +59,14 @@
#define USB_CAN_R_PRODUCT_ID 39
#define USB_LEAF_LITE_V2_PRODUCT_ID 288
#define USB_MINI_PCIE_HS_PRODUCT_ID 289
+#define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 290
+#define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 291
+#define USB_MINI_PCIE_2HS_PRODUCT_ID 292
static inline bool kvaser_is_leaf(const struct usb_device_id *id)
{
return id->idProduct >= USB_LEAF_DEVEL_PRODUCT_ID &&
- id->idProduct <= USB_MINI_PCIE_HS_PRODUCT_ID;
+ id->idProduct <= USB_MINI_PCIE_2HS_PRODUCT_ID;
}
/* Kvaser USBCan-II devices */
@@ -537,6 +540,9 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = KVASER_HAS_TXRX_ERRORS },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID) },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID) },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID) },
/* USBCANII family IDs */
{ USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID),
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 5a2e341a6..bfb91d8fa 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -274,7 +274,7 @@ static void peak_usb_write_bulk_callback(struct urb *urb)
netdev->stats.tx_bytes += context->data_len;
/* prevent tx timeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
break;
default:
@@ -373,7 +373,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb,
stats->tx_dropped++;
}
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 64c016a99..221f5f011 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1106,7 +1106,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev)
myNextTxDesc->skb = skb;
- dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ netif_trans_update(dev); /* NETIF_F_LLTX driver :( */
e100_hardware_send_packet(np, buf, skb->len);
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 90ba003d8..200663c43 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -1,10 +1,6 @@
menu "Distributed Switch Architecture drivers"
depends on HAVE_NET_DSA
-config NET_DSA_MV88E6XXX
- tristate
- default n
-
config NET_DSA_MV88E6060
tristate "Marvell 88E6060 ethernet switch chip support"
depends on NET_DSA
@@ -13,46 +9,13 @@ config NET_DSA_MV88E6060
This enables support for the Marvell 88E6060 ethernet switch
chip.
-config NET_DSA_MV88E6XXX_NEED_PPU
- bool
- default n
-
-config NET_DSA_MV88E6131
- tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_MV88E6XXX_NEED_PPU
- select NET_DSA_TAG_DSA
- ---help---
- This enables support for the Marvell 88E6085/6095/6095F/6131
- ethernet switch chips.
-
-config NET_DSA_MV88E6123
- tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for the Marvell 88E6123/6161/6165
- ethernet switch chips.
-
-config NET_DSA_MV88E6171
- tristate "Marvell 88E6171/6175/6350/6351 ethernet switch chip support"
- depends on NET_DSA
- select NET_DSA_MV88E6XXX
- select NET_DSA_TAG_EDSA
- ---help---
- This enables support for the Marvell 88E6171/6175/6350/6351
- ethernet switches chips.
-
-config NET_DSA_MV88E6352
- tristate "Marvell 88E6172/6176/6320/6321/6352 ethernet switch chip support"
+config NET_DSA_MV88E6XXX
+ tristate "Marvell 88E6xxx Ethernet switch chip support"
depends on NET_DSA
- select NET_DSA_MV88E6XXX
select NET_DSA_TAG_EDSA
---help---
- This enables support for the Marvell 88E6172, 88E6176, 88E6320,
- 88E6321 and 88E6352 ethernet switch chips.
+ This enables support for most of the Marvell 88E6xxx models of
+ Ethernet switch chips, except 88E6060.
config NET_DSA_BCM_SF2
tristate "Broadcom Starfighter 2 Ethernet switch support"
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index a6e09939b..76b751dd9 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,16 +1,3 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
-mv88e6xxx_drv-y += mv88e6xxx.o
-ifdef CONFIG_NET_DSA_MV88E6123
-mv88e6xxx_drv-y += mv88e6123.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6131
-mv88e6xxx_drv-y += mv88e6131.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6352
-mv88e6xxx_drv-y += mv88e6352.o
-endif
-ifdef CONFIG_NET_DSA_MV88E6171
-mv88e6xxx_drv-y += mv88e6171.o
-endif
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx.o
obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 95944d5e3..10ddd5a5d 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -135,8 +135,17 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds)
return BCM_SF2_STATS_SIZE;
}
-static char *bcm_sf2_sw_probe(struct device *host_dev, int sw_addr)
+static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **_priv)
{
+ struct bcm_sf2_priv *priv;
+
+ priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ *_priv = priv;
+
return "Broadcom Starfighter 2";
}
@@ -151,7 +160,7 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
* the same VLAN.
*/
for (i = 0; i < priv->hw_params.num_ports; i++) {
- if (!((1 << i) & ds->phys_port_mask))
+ if (!((1 << i) & ds->enabled_port_mask))
continue;
reg = core_readl(priv, CORE_PORT_VLAN_CTL_PORT(i));
@@ -545,12 +554,11 @@ static void bcm_sf2_sw_br_leave(struct dsa_switch *ds, int port)
priv->port_sts[port].bridge_dev = NULL;
}
-static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
- u8 state)
+static void bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
+ u8 state)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
u8 hw_state, cur_hw_state;
- int ret = 0;
u32 reg;
reg = core_readl(priv, CORE_G_PCTL_PORT(port));
@@ -574,7 +582,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
break;
default:
pr_err("%s: invalid STP state: %d\n", __func__, state);
- return -EINVAL;
+ return;
}
/* Fast-age ARL entries if we are moving a port from Learning or
@@ -584,10 +592,9 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
if (cur_hw_state != hw_state) {
if (cur_hw_state >= G_MISTP_LEARN_STATE &&
hw_state <= G_MISTP_LISTEN_STATE) {
- ret = bcm_sf2_sw_fast_age_port(ds, port);
- if (ret) {
+ if (bcm_sf2_sw_fast_age_port(ds, port)) {
pr_err("%s: fast-ageing failed\n", __func__);
- return ret;
+ return;
}
}
}
@@ -596,8 +603,6 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
reg &= ~(G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
reg |= hw_state;
core_writel(priv, reg, CORE_G_PCTL_PORT(port));
-
- return 0;
}
/* Address Resolution Logic routines */
@@ -728,13 +733,14 @@ static int bcm_sf2_sw_fdb_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static void bcm_sf2_sw_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
struct bcm_sf2_priv *priv = ds_to_priv(ds);
- return bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true);
+ if (bcm_sf2_arl_op(priv, 0, port, fdb->addr, fdb->vid, true))
+ pr_err("%s: failed to add MAC address\n", __func__);
}
static int bcm_sf2_sw_fdb_del(struct dsa_switch *ds, int port,
@@ -943,8 +949,8 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* All the interesting properties are at the parent device_node
* level
*/
- dn = ds->pd->of_node->parent;
- bcm_sf2_identify_ports(priv, ds->pd->of_node);
+ dn = ds->cd->of_node->parent;
+ bcm_sf2_identify_ports(priv, ds->cd->of_node);
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
@@ -1003,7 +1009,7 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
/* Enable all valid ports and disable those unused */
for (port = 0; port < priv->hw_params.num_ports; port++) {
/* IMP port receives special treatment */
- if ((1 << port) & ds->phys_port_mask)
+ if ((1 << port) & ds->enabled_port_mask)
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
@@ -1016,11 +1022,12 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
* 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
* that we can use the regular SWITCH_MDIO master controller instead.
*
- * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
- * to have a 1:1 mapping between Port address and PHY address in order
- * to utilize the slave_mii_bus instance to read from Port PHYs. This is
- * not what we want here, so we initialize phys_mii_mask 0 to always
- * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
+ * By default, DSA initializes ds->phys_mii_mask to
+ * ds->enabled_port_mask to have a 1:1 mapping between Port address
+ * and PHY address in order to utilize the slave_mii_bus instance to
+ * read from Port PHYs. This is not what we want here, so we
+ * initialize phys_mii_mask 0 to always utilize the "master" MDIO
+ * bus backed by the "mdio-unimac" driver.
*/
if (of_machine_is_compatible("brcm,bcm7445d0"))
ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
@@ -1278,7 +1285,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
* bcm_sf2_sw_setup
*/
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->phys_port_mask ||
+ if ((1 << port) & ds->enabled_port_mask ||
dsa_is_cpu_port(ds, port))
bcm_sf2_port_disable(ds, port, NULL);
}
@@ -1302,7 +1309,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
bcm_sf2_gphy_enable_set(ds, true);
for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->phys_port_mask)
+ if ((1 << port) & ds->enabled_port_mask)
bcm_sf2_port_setup(ds, port, NULL);
else if (dsa_is_cpu_port(ds, port))
bcm_sf2_imp_setup(ds, port);
@@ -1365,8 +1372,7 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
static struct dsa_switch_driver bcm_sf2_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_BRCM,
- .priv_size = sizeof(struct bcm_sf2_priv),
- .probe = bcm_sf2_sw_probe,
+ .probe = bcm_sf2_sw_drv_probe,
.setup = bcm_sf2_sw_setup,
.set_addr = bcm_sf2_sw_set_addr,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
@@ -1387,7 +1393,7 @@ static struct dsa_switch_driver bcm_sf2_switch_driver = {
.set_eee = bcm_sf2_sw_set_eee,
.port_bridge_join = bcm_sf2_sw_br_join,
.port_bridge_leave = bcm_sf2_sw_br_leave,
- .port_stp_update = bcm_sf2_sw_br_set_stp_state,
+ .port_stp_state_set = bcm_sf2_sw_br_set_stp_state,
.port_fdb_prepare = bcm_sf2_sw_fdb_prepare,
.port_fdb_add = bcm_sf2_sw_fdb_add,
.port_fdb_del = bcm_sf2_sw_fdb_del,
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 0527f485c..e36b40886 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -19,12 +19,9 @@
static int reg_read(struct dsa_switch *ds, int addr, int reg)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
+ struct mv88e6060_priv *priv = ds_to_priv(ds);
- if (bus == NULL)
- return -EINVAL;
-
- return mdiobus_read_nested(bus, ds->pd->sw_addr + addr, reg);
+ return mdiobus_read_nested(priv->bus, priv->sw_addr + addr, reg);
}
#define REG_READ(addr, reg) \
@@ -40,12 +37,9 @@ static int reg_read(struct dsa_switch *ds, int addr, int reg)
static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
- if (bus == NULL)
- return -EINVAL;
+ struct mv88e6060_priv *priv = ds_to_priv(ds);
- return mdiobus_write_nested(bus, ds->pd->sw_addr + addr, reg, val);
+ return mdiobus_write_nested(priv->bus, priv->sw_addr + addr, reg, val);
}
#define REG_WRITE(addr, reg, val) \
@@ -57,14 +51,10 @@ static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
return __ret; \
})
-static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
+static const char *mv88e6060_get_name(struct mii_bus *bus, int sw_addr)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
int ret;
- if (bus == NULL)
- return NULL;
-
ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
if (ret >= 0) {
if (ret == PORT_SWITCH_ID_6060)
@@ -79,6 +69,27 @@ static char *mv88e6060_probe(struct device *host_dev, int sw_addr)
return NULL;
}
+static const char *mv88e6060_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **_priv)
+{
+ struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
+ struct mv88e6060_priv *priv;
+ const char *name;
+
+ name = mv88e6060_get_name(bus, sw_addr);
+ if (name) {
+ priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return NULL;
+ *_priv = priv;
+ priv->bus = bus;
+ priv->sw_addr = sw_addr;
+ }
+
+ return name;
+}
+
static int mv88e6060_switch_reset(struct dsa_switch *ds)
{
int i;
@@ -159,7 +170,7 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
REG_WRITE(addr, PORT_VLAN_MAP,
((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
(dsa_is_cpu_port(ds, p) ?
- ds->phys_port_mask :
+ ds->enabled_port_mask :
BIT(ds->dst->cpu_port)));
/* Port Association Vector: when learning source addresses
@@ -174,8 +185,8 @@ static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
static int mv88e6060_setup(struct dsa_switch *ds)
{
- int i;
int ret;
+ int i;
ret = mv88e6060_switch_reset(ds);
if (ret < 0)
@@ -238,7 +249,7 @@ mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
static struct dsa_switch_driver mv88e6060_switch_driver = {
.tag_protocol = DSA_TAG_PROTO_TRAILER,
- .probe = mv88e6060_probe,
+ .probe = mv88e6060_drv_probe,
.setup = mv88e6060_setup,
.set_addr = mv88e6060_set_addr,
.phy_read = mv88e6060_phy_read,
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
index cc9b2ed4a..10249bd16 100644
--- a/drivers/net/dsa/mv88e6060.h
+++ b/drivers/net/dsa/mv88e6060.h
@@ -108,4 +108,15 @@
#define GLOBAL_ATU_MAC_23 0x0e
#define GLOBAL_ATU_MAC_45 0x0f
+struct mv88e6060_priv {
+ /* MDIO bus and address on bus to use. When in single chip
+ * mode, address is 0, and the switch uses multiple addresses
+ * on the bus. When in multi-chip mode, the switch uses a
+ * single address which contains two registers used for
+ * indirect access to more registers.
+ */
+ struct mii_bus *bus;
+ int sw_addr;
+};
+
#endif
diff --git a/drivers/net/dsa/mv88e6123.c b/drivers/net/dsa/mv88e6123.c
deleted file mode 100644
index 69a6f79dc..000000000
--- a/drivers/net/dsa/mv88e6123.c
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6123_table[] = {
- { PORT_SWITCH_ID_6123, "Marvell 88E6123" },
- { PORT_SWITCH_ID_6123_A1, "Marvell 88E6123 (A1)" },
- { PORT_SWITCH_ID_6123_A2, "Marvell 88E6123 (A2)" },
- { PORT_SWITCH_ID_6161, "Marvell 88E6161" },
- { PORT_SWITCH_ID_6161_A1, "Marvell 88E6161 (A1)" },
- { PORT_SWITCH_ID_6161_A2, "Marvell 88E6161 (A2)" },
- { PORT_SWITCH_ID_6165, "Marvell 88E6165" },
- { PORT_SWITCH_ID_6165_A1, "Marvell 88E6165 (A1)" },
- { PORT_SWITCH_ID_6165_A2, "Marvell 88e6165 (A2)" },
-};
-
-static char *mv88e6123_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6123_table,
- ARRAY_SIZE(mv88e6123_table));
-}
-
-static int mv88e6123_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Disable the PHY polling unit (since there won't be any
- * external PHYs to poll), don't discard packets with
- * excessive collisions, and mask all interrupt sources.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, 0x0000);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6123_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6123:
- ps->num_ports = 3;
- break;
- case PORT_SWITCH_ID_6161:
- case PORT_SWITCH_ID_6165:
- ps->num_ports = 6;
- break;
- default:
- return -ENODEV;
- }
-
- ret = mv88e6xxx_switch_reset(ds, false);
- if (ret < 0)
- return ret;
-
- ret = mv88e6123_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6123_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6123_probe,
- .setup = mv88e6123_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read,
- .phy_write = mv88e6xxx_phy_write,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
-#endif
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
-};
-
-MODULE_ALIAS("platform:mv88e6123");
-MODULE_ALIAS("platform:mv88e6161");
-MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
deleted file mode 100644
index a92ca651c..000000000
--- a/drivers/net/dsa/mv88e6131.c
+++ /dev/null
@@ -1,177 +0,0 @@
-/*
- * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6131_table[] = {
- { PORT_SWITCH_ID_6085, "Marvell 88E6085" },
- { PORT_SWITCH_ID_6095, "Marvell 88E6095/88E6095F" },
- { PORT_SWITCH_ID_6131, "Marvell 88E6131" },
- { PORT_SWITCH_ID_6131_B2, "Marvell 88E6131 (B2)" },
- { PORT_SWITCH_ID_6185, "Marvell 88E6185" },
-};
-
-static char *mv88e6131_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6131_table,
- ARRAY_SIZE(mv88e6131_table));
-}
-
-static int mv88e6131_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Enable the PHY polling unit, don't discard packets with
- * excessive collisions, use a weighted fair queueing scheme
- * to arbitrate between packet queues, set the maximum frame
- * size to 1632, and mask all interrupt sources.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_MAX_FRAME_1632);
-
- /* Set the VLAN ethertype to 0x8100. */
- REG_WRITE(REG_GLOBAL, GLOBAL_CORE_TAG_TYPE, 0x8100);
-
- /* Disable ARP mirroring, and configure the upstream port as
- * the port to which ingress and egress monitor frames are to
- * be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- GLOBAL_MONITOR_CONTROL_ARP_DISABLED;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable cascade port functionality unless this device
- * is used in a cascade configuration, and set the switch's
- * DSA device number.
- */
- if (ds->dst->pd->nr_chips > 1)
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
- (ds->index & 0x1f));
- else
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2,
- GLOBAL_CONTROL_2_NO_CASCADE |
- (ds->index & 0x1f));
-
- /* Force the priority of IGMP/MLD snoop frames and ARP frames
- * to the highest setting.
- */
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
- GLOBAL2_PRIO_OVERRIDE_FORCE_SNOOP |
- 7 << GLOBAL2_PRIO_OVERRIDE_SNOOP_SHIFT |
- GLOBAL2_PRIO_OVERRIDE_FORCE_ARP |
- 7 << GLOBAL2_PRIO_OVERRIDE_ARP_SHIFT);
-
- return 0;
-}
-
-static int mv88e6131_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- mv88e6xxx_ppu_state_init(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6085:
- case PORT_SWITCH_ID_6185:
- ps->num_ports = 10;
- break;
- case PORT_SWITCH_ID_6095:
- ps->num_ports = 11;
- break;
- case PORT_SWITCH_ID_6131:
- case PORT_SWITCH_ID_6131_B2:
- ps->num_ports = 8;
- break;
- default:
- return -ENODEV;
- }
-
- ret = mv88e6xxx_switch_reset(ds, false);
- if (ret < 0)
- return ret;
-
- ret = mv88e6131_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6131_port_to_phy_addr(struct dsa_switch *ds, int port)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (port >= 0 && port < ps->num_ports)
- return port;
-
- return -EINVAL;
-}
-
-static int
-mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
-{
- int addr = mv88e6131_port_to_phy_addr(ds, port);
-
- if (addr < 0)
- return addr;
-
- return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
-}
-
-static int
-mv88e6131_phy_write(struct dsa_switch *ds,
- int port, int regnum, u16 val)
-{
- int addr = mv88e6131_port_to_phy_addr(ds, port);
-
- if (addr < 0)
- return addr;
-
- return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
-}
-
-struct dsa_switch_driver mv88e6131_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_DSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6131_probe,
- .setup = mv88e6131_setup,
- .set_addr = mv88e6xxx_set_addr_direct,
- .phy_read = mv88e6131_phy_read,
- .phy_write = mv88e6131_phy_write,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-};
-
-MODULE_ALIAS("platform:mv88e6085");
-MODULE_ALIAS("platform:mv88e6095");
-MODULE_ALIAS("platform:mv88e6095f");
-MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
deleted file mode 100644
index c0164b98f..000000000
--- a/drivers/net/dsa/mv88e6171.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/* net/dsa/mv88e6171.c - Marvell 88e6171 switch chip support
- * Copyright (c) 2008-2009 Marvell Semiconductor
- * Copyright (c) 2014 Claudio Leite <leitec@staticky.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6171_table[] = {
- { PORT_SWITCH_ID_6171, "Marvell 88E6171" },
- { PORT_SWITCH_ID_6175, "Marvell 88E6175" },
- { PORT_SWITCH_ID_6350, "Marvell 88E6350" },
- { PORT_SWITCH_ID_6351, "Marvell 88E6351" },
-};
-
-static char *mv88e6171_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6171_table,
- ARRAY_SIZE(mv88e6171_table));
-}
-
-static int mv88e6171_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Discard packets with excessive collisions, mask all
- * interrupt sources, enable PPU.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_MIRROR_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL_2, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6171_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- ps->num_ports = 7;
-
- ret = mv88e6xxx_switch_reset(ds, true);
- if (ret < 0)
- return ret;
-
- ret = mv88e6171_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-struct dsa_switch_driver mv88e6171_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6171_probe,
- .setup = mv88e6171_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read_indirect,
- .phy_write = mv88e6xxx_phy_write_indirect,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
-#endif
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
- .port_bridge_join = mv88e6xxx_port_bridge_join,
- .port_bridge_leave = mv88e6xxx_port_bridge_leave,
- .port_stp_update = mv88e6xxx_port_stp_update,
- .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
- .port_vlan_add = mv88e6xxx_port_vlan_add,
- .port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_vlan_dump = mv88e6xxx_port_vlan_dump,
- .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6171");
-MODULE_ALIAS("platform:mv88e6175");
-MODULE_ALIAS("platform:mv88e6350");
-MODULE_ALIAS("platform:mv88e6351");
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
deleted file mode 100644
index 5f528abc8..000000000
--- a/drivers/net/dsa/mv88e6352.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * net/dsa/mv88e6352.c - Marvell 88e6352 switch chip support
- *
- * Copyright (c) 2014 Guenter Roeck
- *
- * Derived from mv88e6123_61_65.c
- * Copyright (c) 2008-2009 Marvell Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/delay.h>
-#include <linux/jiffies.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/platform_device.h>
-#include <linux/phy.h>
-#include <net/dsa.h>
-#include "mv88e6xxx.h"
-
-static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
- { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
- { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
- { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
- { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
- { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
- { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
- { PORT_SWITCH_ID_6321, "Marvell 88E6321" },
- { PORT_SWITCH_ID_6321_A1, "Marvell 88E6321 (A1)" },
- { PORT_SWITCH_ID_6321_A2, "Marvell 88e6321 (A2)" },
- { PORT_SWITCH_ID_6352, "Marvell 88E6352" },
- { PORT_SWITCH_ID_6352_A0, "Marvell 88E6352 (A0)" },
- { PORT_SWITCH_ID_6352_A1, "Marvell 88E6352 (A1)" },
-};
-
-static char *mv88e6352_probe(struct device *host_dev, int sw_addr)
-{
- return mv88e6xxx_lookup_name(host_dev, sw_addr, mv88e6352_table,
- ARRAY_SIZE(mv88e6352_table));
-}
-
-static int mv88e6352_setup_global(struct dsa_switch *ds)
-{
- u32 upstream_port = dsa_upstream_port(ds);
- int ret;
- u32 reg;
-
- ret = mv88e6xxx_setup_global(ds);
- if (ret)
- return ret;
-
- /* Discard packets with excessive collisions,
- * mask all interrupt sources, enable PPU (bit 14, undocumented).
- */
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- GLOBAL_CONTROL_PPU_ENABLE | GLOBAL_CONTROL_DISCARD_EXCESS);
-
- /* Configure the upstream port, and configure the upstream
- * port as the port to which ingress and egress monitor frames
- * are to be sent.
- */
- reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
- upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
- REG_WRITE(REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
-
- /* Disable remote management for now, and set the switch's
- * DSA device number.
- */
- REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
-
- return 0;
-}
-
-static int mv88e6352_setup(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- ret = mv88e6xxx_setup_common(ds);
- if (ret < 0)
- return ret;
-
- ps->num_ports = 7;
-
- mutex_init(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_switch_reset(ds, true);
- if (ret < 0)
- return ret;
-
- ret = mv88e6352_setup_global(ds);
- if (ret < 0)
- return ret;
-
- return mv88e6xxx_setup_ports(ds);
-}
-
-static int mv88e6352_read_eeprom_word(struct dsa_switch *ds, int addr)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
- GLOBAL2_EEPROM_OP_READ |
- (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_eeprom_busy_wait(ds);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
-error:
- mutex_unlock(&ps->eeprom_mutex);
- return ret;
-}
-
-static int mv88e6352_get_eeprom(struct dsa_switch *ds,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- int offset;
- int len;
- int ret;
-
- offset = eeprom->offset;
- len = eeprom->len;
- eeprom->len = 0;
-
- eeprom->magic = 0xc3ec4951;
-
- ret = mv88e6xxx_eeprom_load_wait(ds);
- if (ret < 0)
- return ret;
-
- if (offset & 1) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = (word >> 8) & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- while (len >= 2) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = word & 0xff;
- *data++ = (word >> 8) & 0xff;
-
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
-
- if (len) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- *data++ = word & 0xff;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- return 0;
-}
-
-static int mv88e6352_eeprom_is_readonly(struct dsa_switch *ds)
-{
- int ret;
-
- ret = mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
- if (ret < 0)
- return ret;
-
- if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
- return -EROFS;
-
- return 0;
-}
-
-static int mv88e6352_write_eeprom_word(struct dsa_switch *ds, int addr,
- u16 data)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
-
- mutex_lock(&ps->eeprom_mutex);
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
- GLOBAL2_EEPROM_OP_WRITE |
- (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_eeprom_busy_wait(ds);
-error:
- mutex_unlock(&ps->eeprom_mutex);
- return ret;
-}
-
-static int mv88e6352_set_eeprom(struct dsa_switch *ds,
- struct ethtool_eeprom *eeprom, u8 *data)
-{
- int offset;
- int ret;
- int len;
-
- if (eeprom->magic != 0xc3ec4951)
- return -EINVAL;
-
- ret = mv88e6352_eeprom_is_readonly(ds);
- if (ret)
- return ret;
-
- offset = eeprom->offset;
- len = eeprom->len;
- eeprom->len = 0;
-
- ret = mv88e6xxx_eeprom_load_wait(ds);
- if (ret < 0)
- return ret;
-
- if (offset & 1) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- word = (*data++ << 8) | (word & 0xff);
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- while (len >= 2) {
- int word;
-
- word = *data++;
- word |= *data++ << 8;
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset += 2;
- len -= 2;
- eeprom->len += 2;
- }
-
- if (len) {
- int word;
-
- word = mv88e6352_read_eeprom_word(ds, offset >> 1);
- if (word < 0)
- return word;
-
- word = (word & 0xff00) | *data++;
-
- ret = mv88e6352_write_eeprom_word(ds, offset >> 1, word);
- if (ret < 0)
- return ret;
-
- offset++;
- len--;
- eeprom->len++;
- }
-
- return 0;
-}
-
-struct dsa_switch_driver mv88e6352_switch_driver = {
- .tag_protocol = DSA_TAG_PROTO_EDSA,
- .priv_size = sizeof(struct mv88e6xxx_priv_state),
- .probe = mv88e6352_probe,
- .setup = mv88e6352_setup,
- .set_addr = mv88e6xxx_set_addr_indirect,
- .phy_read = mv88e6xxx_phy_read_indirect,
- .phy_write = mv88e6xxx_phy_write_indirect,
- .get_strings = mv88e6xxx_get_strings,
- .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
- .get_sset_count = mv88e6xxx_get_sset_count,
- .adjust_link = mv88e6xxx_adjust_link,
- .set_eee = mv88e6xxx_set_eee,
- .get_eee = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
- .get_temp_limit = mv88e6xxx_get_temp_limit,
- .set_temp_limit = mv88e6xxx_set_temp_limit,
- .get_temp_alarm = mv88e6xxx_get_temp_alarm,
-#endif
- .get_eeprom = mv88e6352_get_eeprom,
- .set_eeprom = mv88e6352_set_eeprom,
- .get_regs_len = mv88e6xxx_get_regs_len,
- .get_regs = mv88e6xxx_get_regs,
- .port_bridge_join = mv88e6xxx_port_bridge_join,
- .port_bridge_leave = mv88e6xxx_port_bridge_leave,
- .port_stp_update = mv88e6xxx_port_stp_update,
- .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
- .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
- .port_vlan_add = mv88e6xxx_port_vlan_add,
- .port_vlan_del = mv88e6xxx_port_vlan_del,
- .port_vlan_dump = mv88e6xxx_port_vlan_dump,
- .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
- .port_fdb_add = mv88e6xxx_port_fdb_add,
- .port_fdb_del = mv88e6xxx_port_fdb_del,
- .port_fdb_dump = mv88e6xxx_port_fdb_dump,
-};
-
-MODULE_ALIAS("platform:mv88e6172");
-MODULE_ALIAS("platform:mv88e6176");
-MODULE_ALIAS("platform:mv88e6320");
-MODULE_ALIAS("platform:mv88e6321");
-MODULE_ALIAS("platform:mv88e6352");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 5e572b351..ba9dfc942 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -5,6 +5,8 @@
* Copyright (c) 2015 CMC Electronics, Inc.
* Added support for VLAN Table Unit operations
*
+ * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -17,6 +19,7 @@
#include <linux/if_bridge.h>
#include <linux/jiffies.h>
#include <linux/list.h>
+#include <linux/mdio.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/gpio/consumer.h>
@@ -25,12 +28,10 @@
#include <net/switchdev.h>
#include "mv88e6xxx.h"
-static void assert_smi_lock(struct dsa_switch *ds)
+static void assert_smi_lock(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
if (unlikely(!mutex_is_locked(&ps->smi_mutex))) {
- dev_err(ds->master_dev, "SMI lock not held!\n");
+ dev_err(ps->dev, "SMI lock not held!\n");
dump_stack();
}
}
@@ -92,33 +93,29 @@ static int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr,
return ret & 0xffff;
}
-static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+static int _mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps,
+ int addr, int reg)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
int ret;
- assert_smi_lock(ds);
-
- if (bus == NULL)
- return -EINVAL;
+ assert_smi_lock(ps);
- ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
+ ret = __mv88e6xxx_reg_read(ps->bus, ps->sw_addr, addr, reg);
if (ret < 0)
return ret;
- dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ dev_dbg(ps->dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, ret);
return ret;
}
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+int mv88e6xxx_reg_read(struct mv88e6xxx_priv_state *ps, int addr, int reg)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, addr, reg);
+ ret = _mv88e6xxx_reg_read(ps, addr, reg);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -156,58 +153,71 @@ static int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
return 0;
}
-static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
- u16 val)
+static int _mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int reg, u16 val)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
-
- assert_smi_lock(ds);
+ assert_smi_lock(ps);
- if (bus == NULL)
- return -EINVAL;
-
- dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
+ dev_dbg(ps->dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
addr, reg, val);
- return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
+ return __mv88e6xxx_reg_write(ps->bus, ps->sw_addr, addr, reg, val);
}
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+int mv88e6xxx_reg_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int reg, u16 val)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
+ ret = _mv88e6xxx_reg_write(ps, addr, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
{
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
- REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int err;
- return 0;
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_01,
+ (addr[0] << 8) | addr[1]);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_23,
+ (addr[2] << 8) | addr[3]);
+ if (err)
+ return err;
+
+ return mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MAC_45,
+ (addr[4] << 8) | addr[5]);
}
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+static int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
{
- int i;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
+ int i;
for (i = 0; i < 6; i++) {
int j;
/* Write the MAC address byte. */
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
- GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
+ GLOBAL2_SWITCH_MAC_BUSY |
+ (i << 8) | addr[i]);
+ if (ret)
+ return ret;
/* Wait for the write to complete. */
for (j = 0; j < 16; j++) {
- ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2,
+ GLOBAL2_SWITCH_MAC);
+ if (ret < 0)
+ return ret;
+
if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
break;
}
@@ -218,34 +228,52 @@ int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
return 0;
}
-static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SWITCH_MAC))
+ return mv88e6xxx_set_addr_indirect(ds, addr);
+ else
+ return mv88e6xxx_set_addr_direct(ds, addr);
+}
+
+static int _mv88e6xxx_phy_read(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum)
{
if (addr >= 0)
- return _mv88e6xxx_reg_read(ds, addr, regnum);
+ return _mv88e6xxx_reg_read(ps, addr, regnum);
return 0xffff;
}
-static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
- u16 val)
+static int _mv88e6xxx_phy_write(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum, u16 val)
{
if (addr >= 0)
- return _mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return _mv88e6xxx_reg_write(ps, addr, regnum, val);
return 0;
}
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
-static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_disable(struct mv88e6xxx_priv_state *ps)
{
int ret;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
- ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret & ~GLOBAL_CONTROL_PPU_ENABLE);
+ if (ret)
+ return ret;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
usleep_range(1000, 2000);
if ((ret & GLOBAL_STATUS_PPU_MASK) !=
GLOBAL_STATUS_PPU_POLLING)
@@ -255,17 +283,26 @@ static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_enable(struct mv88e6xxx_priv_state *ps)
{
- int ret;
+ int ret, err;
unsigned long timeout;
- ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
- REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ err = mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL,
+ ret | GLOBAL_CONTROL_PPU_ENABLE);
+ if (err)
+ return err;
timeout = jiffies + 1 * HZ;
while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATUS);
+ if (ret < 0)
+ return ret;
+
usleep_range(1000, 2000);
if ((ret & GLOBAL_STATUS_PPU_MASK) ==
GLOBAL_STATUS_PPU_POLLING)
@@ -281,9 +318,7 @@ static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
if (mutex_trylock(&ps->ppu_mutex)) {
- struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
-
- if (mv88e6xxx_ppu_enable(ds) == 0)
+ if (mv88e6xxx_ppu_enable(ps) == 0)
ps->ppu_disabled = 0;
mutex_unlock(&ps->ppu_mutex);
}
@@ -296,9 +331,8 @@ static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
schedule_work(&ps->ppu_work);
}
-static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+static int mv88e6xxx_ppu_access_get(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->ppu_mutex);
@@ -309,7 +343,7 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
* it.
*/
if (!ps->ppu_disabled) {
- ret = mv88e6xxx_ppu_disable(ds);
+ ret = mv88e6xxx_ppu_disable(ps);
if (ret < 0) {
mutex_unlock(&ps->ppu_mutex);
return ret;
@@ -323,19 +357,15 @@ static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
return ret;
}
-static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+static void mv88e6xxx_ppu_access_put(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
/* Schedule a timer to re-enable the PHY polling unit. */
mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
mutex_unlock(&ps->ppu_mutex);
}
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+void mv88e6xxx_ppu_state_init(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
mutex_init(&ps->ppu_mutex);
INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
init_timer(&ps->ppu_timer);
@@ -343,142 +373,86 @@ void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
}
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+static int mv88e6xxx_phy_read_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ds);
+ ret = mv88e6xxx_ppu_access_get(ps);
if (ret >= 0) {
- ret = mv88e6xxx_reg_read(ds, addr, regnum);
- mv88e6xxx_ppu_access_put(ds);
+ ret = _mv88e6xxx_reg_read(ps, addr, regnum);
+ mv88e6xxx_ppu_access_put(ps);
}
return ret;
}
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
- int regnum, u16 val)
+static int mv88e6xxx_phy_write_ppu(struct mv88e6xxx_priv_state *ps, int addr,
+ int regnum, u16 val)
{
int ret;
- ret = mv88e6xxx_ppu_access_get(ds);
+ ret = mv88e6xxx_ppu_access_get(ps);
if (ret >= 0) {
- ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
- mv88e6xxx_ppu_access_put(ds);
+ ret = _mv88e6xxx_reg_write(ps, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ps);
}
return ret;
}
-#endif
-static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6065_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6031:
- case PORT_SWITCH_ID_6061:
- case PORT_SWITCH_ID_6035:
- case PORT_SWITCH_ID_6065:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6065;
}
-static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6095_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6092:
- case PORT_SWITCH_ID_6095:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6095;
}
-static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6097_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6046:
- case PORT_SWITCH_ID_6085:
- case PORT_SWITCH_ID_6096:
- case PORT_SWITCH_ID_6097:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6097;
}
-static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6165_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6123:
- case PORT_SWITCH_ID_6161:
- case PORT_SWITCH_ID_6165:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6165;
}
-static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6185_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6121:
- case PORT_SWITCH_ID_6122:
- case PORT_SWITCH_ID_6152:
- case PORT_SWITCH_ID_6155:
- case PORT_SWITCH_ID_6182:
- case PORT_SWITCH_ID_6185:
- case PORT_SWITCH_ID_6108:
- case PORT_SWITCH_ID_6131:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6185;
}
-static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6320_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- switch (ps->id) {
- case PORT_SWITCH_ID_6320:
- case PORT_SWITCH_ID_6321:
- return true;
- }
- return false;
+ return ps->info->family == MV88E6XXX_FAMILY_6320;
}
-static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
+static bool mv88e6xxx_6351_family(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ return ps->info->family == MV88E6XXX_FAMILY_6351;
+}
- switch (ps->id) {
- case PORT_SWITCH_ID_6171:
- case PORT_SWITCH_ID_6175:
- case PORT_SWITCH_ID_6350:
- case PORT_SWITCH_ID_6351:
- return true;
- }
- return false;
+static bool mv88e6xxx_6352_family(struct mv88e6xxx_priv_state *ps)
+{
+ return ps->info->family == MV88E6XXX_FAMILY_6352;
}
-static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
+static unsigned int mv88e6xxx_num_databases(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ return ps->info->num_databases;
+}
- switch (ps->id) {
- case PORT_SWITCH_ID_6172:
- case PORT_SWITCH_ID_6176:
- case PORT_SWITCH_ID_6240:
- case PORT_SWITCH_ID_6352:
+static bool mv88e6xxx_has_fid_reg(struct mv88e6xxx_priv_state *ps)
+{
+ /* Does the device have dedicated FID registers for ATU and VTU ops? */
+ if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps))
return true;
- }
+
return false;
}
@@ -486,8 +460,8 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
* phy. However, in the case of a fixed link phy, we force the port
* settings from the fixed link settings.
*/
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u32 reg;
@@ -498,7 +472,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
if (ret < 0)
goto out;
@@ -512,7 +486,7 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
if (phydev->link)
reg |= PORT_PCS_CTRL_LINK_UP;
- if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
+ if (mv88e6xxx_6065_family(ps) && phydev->speed > SPEED_100)
goto out;
switch (phydev->speed) {
@@ -534,8 +508,8 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
if (phydev->duplex == DUPLEX_FULL)
reg |= PORT_PCS_CTRL_DUPLEX_FULL;
- if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
- (port >= ps->num_ports - 2)) {
+ if ((mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps)) &&
+ (port >= ps->info->num_ports - 2)) {
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
@@ -544,19 +518,19 @@ void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
}
- _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
+ _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_PCS_CTRL, reg);
out:
mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_stats_wait(struct mv88e6xxx_priv_state *ps)
{
int ret;
int i;
for (i = 0; i < 10; i++) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_OP);
if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
return 0;
}
@@ -564,52 +538,54 @@ static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
return -ETIMEDOUT;
}
-static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_stats_snapshot(struct mv88e6xxx_priv_state *ps,
+ int port)
{
int ret;
- if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
port = (port + 1) << 5;
/* Snapshot the hardware statistics counters for this port. */
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_CAPTURE_PORT |
GLOBAL_STATS_OP_HIST_RX_TX | port);
if (ret < 0)
return ret;
/* Wait for the snapshotting to complete. */
- ret = _mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ps);
if (ret < 0)
return ret;
return 0;
}
-static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+static void _mv88e6xxx_stats_read(struct mv88e6xxx_priv_state *ps,
+ int stat, u32 *val)
{
u32 _val;
int ret;
*val = 0;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
GLOBAL_STATS_OP_READ_CAPTURED |
GLOBAL_STATS_OP_HIST_RX_TX | stat);
if (ret < 0)
return;
- ret = _mv88e6xxx_stats_wait(ds);
+ ret = _mv88e6xxx_stats_wait(ps);
if (ret < 0)
return;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
if (ret < 0)
return;
_val = ret << 16;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
if (ret < 0)
return;
@@ -678,26 +654,26 @@ static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
{ "out_management", 4, 0x1f | GLOBAL_STATS_OP_BANK_1, BANK1, },
};
-static bool mv88e6xxx_has_stat(struct dsa_switch *ds,
+static bool mv88e6xxx_has_stat(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_hw_stat *stat)
{
switch (stat->type) {
case BANK0:
return true;
case BANK1:
- return mv88e6xxx_6320_family(ds);
+ return mv88e6xxx_6320_family(ps);
case PORT:
- return mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6185_family(ds) ||
- mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6352_family(ds);
+ return mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6185_family(ps) ||
+ mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6352_family(ps);
}
return false;
}
-static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
+static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_hw_stat *s,
int port)
{
@@ -708,13 +684,13 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
switch (s->type) {
case PORT:
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), s->reg);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), s->reg);
if (ret < 0)
return UINT64_MAX;
low = ret;
if (s->sizeof_stat == 4) {
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port),
s->reg + 1);
if (ret < 0)
return UINT64_MAX;
@@ -723,22 +699,24 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
break;
case BANK0:
case BANK1:
- _mv88e6xxx_stats_read(ds, s->reg, &low);
+ _mv88e6xxx_stats_read(ps, s->reg, &low);
if (s->sizeof_stat == 8)
- _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ _mv88e6xxx_stats_read(ps, s->reg + 1, &high);
}
value = (((u64)high) << 16) | low;
return value;
}
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
+ uint8_t *data)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat)) {
+ if (mv88e6xxx_has_stat(ps, stat)) {
memcpy(data + j * ETH_GSTRING_LEN, stat->string,
ETH_GSTRING_LEN);
j++;
@@ -746,22 +724,22 @@ void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
}
}
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
int i, j;
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat))
+ if (mv88e6xxx_has_stat(ps, stat))
j++;
}
return j;
}
-void
-mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
- int port, uint64_t *data)
+static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
+ uint64_t *data)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_hw_stat *stat;
@@ -770,15 +748,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_stats_snapshot(ds, port);
+ ret = _mv88e6xxx_stats_snapshot(ps, port);
if (ret < 0) {
mutex_unlock(&ps->smi_mutex);
return;
}
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
- if (mv88e6xxx_has_stat(ds, stat)) {
- data[j] = _mv88e6xxx_get_ethtool_stat(ds, stat, port);
+ if (mv88e6xxx_has_stat(ps, stat)) {
+ data[j] = _mv88e6xxx_get_ethtool_stat(ps, stat, port);
j++;
}
}
@@ -786,14 +764,15 @@ mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
mutex_unlock(&ps->smi_mutex);
}
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
+static int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
{
return 32 * sizeof(u16);
}
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
- struct ethtool_regs *regs, void *_p)
+static void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
+ struct ethtool_regs *regs, void *_p)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 *p = _p;
int i;
@@ -801,16 +780,20 @@ void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
memset(p, 0xff, 32 * sizeof(u16));
+ mutex_lock(&ps->smi_mutex);
+
for (i = 0; i < 32; i++) {
int ret;
- ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), i);
if (ret >= 0)
p[i] = ret;
}
+
+ mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
+static int _mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg, int offset,
u16 mask)
{
unsigned long timeout = jiffies + HZ / 10;
@@ -818,7 +801,7 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
while (time_before(jiffies, timeout)) {
int ret;
- ret = _mv88e6xxx_reg_read(ds, reg, offset);
+ ret = _mv88e6xxx_reg_read(ps, reg, offset);
if (ret < 0)
return ret;
if (!(ret & mask))
@@ -829,91 +812,320 @@ static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
return -ETIMEDOUT;
}
-static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
+static int mv88e6xxx_wait(struct mv88e6xxx_priv_state *ps, int reg,
+ int offset, u16 mask)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_wait(ds, reg, offset, mask);
+ ret = _mv88e6xxx_wait(ps, reg, offset, mask);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_phy_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_BUSY);
}
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_LOAD);
}
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
+static int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
{
- return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ return mv88e6xxx_wait(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
GLOBAL2_EEPROM_OP_BUSY);
}
-static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
+static int mv88e6xxx_read_eeprom_word(struct dsa_switch *ds, int addr)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_READ |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return ps->eeprom_len;
+
+ return 0;
+}
+
+static int mv88e6xxx_get_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int offset;
+ int len;
+ int ret;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return -EOPNOTSUPP;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ eeprom->magic = 0xc3ec4951;
+
+ ret = mv88e6xxx_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = (word >> 8) & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+ *data++ = (word >> 8) & 0xff;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ *data++ = word & 0xff;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int mv88e6xxx_eeprom_is_readonly(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ ret = mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & GLOBAL2_EEPROM_OP_WRITE_EN))
+ return -EROFS;
+
+ return 0;
+}
+
+static int mv88e6xxx_write_eeprom_word(struct dsa_switch *ds, int addr,
+ u16 data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int ret;
+
+ mutex_lock(&ps->eeprom_mutex);
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_DATA, data);
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
+ GLOBAL2_EEPROM_OP_WRITE |
+ (addr & GLOBAL2_EEPROM_OP_ADDR_MASK));
+ if (ret < 0)
+ goto error;
+
+ ret = mv88e6xxx_eeprom_busy_wait(ds);
+error:
+ mutex_unlock(&ps->eeprom_mutex);
+ return ret;
+}
+
+static int mv88e6xxx_set_eeprom(struct dsa_switch *ds,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int offset;
+ int ret;
+ int len;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ return -EOPNOTSUPP;
+
+ if (eeprom->magic != 0xc3ec4951)
+ return -EINVAL;
+
+ ret = mv88e6xxx_eeprom_is_readonly(ds);
+ if (ret)
+ return ret;
+
+ offset = eeprom->offset;
+ len = eeprom->len;
+ eeprom->len = 0;
+
+ ret = mv88e6xxx_eeprom_load_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ if (offset & 1) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (*data++ << 8) | (word & 0xff);
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ while (len >= 2) {
+ int word;
+
+ word = *data++;
+ word |= *data++ << 8;
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset += 2;
+ len -= 2;
+ eeprom->len += 2;
+ }
+
+ if (len) {
+ int word;
+
+ word = mv88e6xxx_read_eeprom_word(ds, offset >> 1);
+ if (word < 0)
+ return word;
+
+ word = (word & 0xff00) | *data++;
+
+ ret = mv88e6xxx_write_eeprom_word(ds, offset >> 1, word);
+ if (ret < 0)
+ return ret;
+
+ offset++;
+ len--;
+ eeprom->len++;
+ }
+
+ return 0;
+}
+
+static int _mv88e6xxx_atu_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_ATU_OP,
GLOBAL_ATU_OP_BUSY);
}
-static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
- int regnum)
+static int _mv88e6xxx_phy_read_indirect(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_READ | (addr << 5) |
regnum);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_phy_wait(ds);
+ ret = _mv88e6xxx_phy_wait(ps);
if (ret < 0)
return ret;
- return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA);
+
+ return ret;
}
-static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
- int regnum, u16 val)
+static int _mv88e6xxx_phy_write_indirect(struct mv88e6xxx_priv_state *ps,
+ int addr, int regnum, u16 val)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SMI_OP,
GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
regnum);
- return _mv88e6xxx_phy_wait(ds);
+ return _mv88e6xxx_phy_wait(ps);
}
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
+static int mv88e6xxx_get_eee(struct dsa_switch *ds, int port,
+ struct ethtool_eee *e)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ reg = _mv88e6xxx_phy_read_indirect(ps, port, 16);
if (reg < 0)
goto out;
e->eee_enabled = !!(reg & 0x0200);
e->tx_lpi_enabled = !!(reg & 0x0100);
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
if (reg < 0)
goto out;
@@ -925,16 +1137,19 @@ out:
return reg;
}
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e)
+static int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
+ struct phy_device *phydev, struct ethtool_eee *e)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int reg;
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
+ ret = _mv88e6xxx_phy_read_indirect(ps, port, 16);
if (ret < 0)
goto out;
@@ -944,25 +1159,45 @@ int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_enabled)
reg |= 0x0100;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 16, reg);
out:
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
+static int _mv88e6xxx_atu_cmd(struct mv88e6xxx_priv_state *ps, u16 fid, u16 cmd)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
+ if (mv88e6xxx_has_fid_reg(ps)) {
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+ if (ret < 0)
+ return ret;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* ATU DBNum[7:4] are located in ATU Control 15:12 */
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL);
+ if (ret < 0)
+ return ret;
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ (ret & 0xfff) |
+ ((fid << 8) & 0xf000));
+ if (ret < 0)
+ return ret;
+
+ /* ATU DBNum[3:0] are located in ATU Operation 3:0 */
+ cmd |= fid & 0xf;
+ }
+
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
if (ret < 0)
return ret;
- return _mv88e6xxx_atu_wait(ds);
+ return _mv88e6xxx_atu_wait(ps);
}
-static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_data_write(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry)
{
u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
@@ -982,30 +1217,25 @@ static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
data |= (entry->portv_trunkid << shift) & mask;
}
- return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
+ return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_DATA, data);
}
-static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_flush_move(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry,
bool static_too)
{
int op;
int err;
- err = _mv88e6xxx_atu_wait(ds);
+ err = _mv88e6xxx_atu_wait(ps);
if (err)
return err;
- err = _mv88e6xxx_atu_data_write(ds, entry);
+ err = _mv88e6xxx_atu_data_write(ps, entry);
if (err)
return err;
if (entry->fid) {
- err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
- entry->fid);
- if (err)
- return err;
-
op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
} else {
@@ -1013,21 +1243,22 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
}
- return _mv88e6xxx_atu_cmd(ds, op);
+ return _mv88e6xxx_atu_cmd(ps, entry->fid, op);
}
-static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
+static int _mv88e6xxx_atu_flush(struct mv88e6xxx_priv_state *ps,
+ u16 fid, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
.fid = fid,
.state = 0, /* EntryState bits must be 0 */
};
- return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
}
-static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
- int to_port, bool static_too)
+static int _mv88e6xxx_atu_move(struct mv88e6xxx_priv_state *ps, u16 fid,
+ int from_port, int to_port, bool static_too)
{
struct mv88e6xxx_atu_entry entry = {
.trunk = false,
@@ -1041,14 +1272,14 @@ static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
entry.portv_trunkid = (to_port & 0x0f) << 4;
entry.portv_trunkid |= from_port & 0x0f;
- return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
+ return _mv88e6xxx_atu_flush_move(ps, &entry, static_too);
}
-static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
- bool static_too)
+static int _mv88e6xxx_atu_remove(struct mv88e6xxx_priv_state *ps, u16 fid,
+ int port, bool static_too)
{
/* Destination port 0xF means remove the entries */
- return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
+ return _mv88e6xxx_atu_move(ps, fid, port, 0x0f, static_too);
}
static const char * const mv88e6xxx_port_state_names[] = {
@@ -1058,12 +1289,14 @@ static const char * const mv88e6xxx_port_state_names[] = {
[PORT_CONTROL_STATE_FORWARDING] = "Forwarding",
};
-static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
+static int _mv88e6xxx_port_state(struct mv88e6xxx_priv_state *ps, int port,
+ u8 state)
{
+ struct dsa_switch *ds = ps->ds;
int reg, ret = 0;
u8 oldstate;
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL);
if (reg < 0)
return reg;
@@ -1078,13 +1311,13 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
oldstate == PORT_CONTROL_STATE_FORWARDING)
&& (state == PORT_CONTROL_STATE_DISABLED ||
state == PORT_CONTROL_STATE_BLOCKING)) {
- ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
+ ret = _mv88e6xxx_atu_remove(ps, 0, port, false);
if (ret)
return ret;
}
reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL,
reg);
if (ret)
return ret;
@@ -1097,11 +1330,12 @@ static int _mv88e6xxx_port_state(struct dsa_switch *ds, int port, u8 state)
return ret;
}
-static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
+static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_priv_state *ps,
+ int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
- const u16 mask = (1 << ps->num_ports) - 1;
+ const u16 mask = (1 << ps->info->num_ports) - 1;
+ struct dsa_switch *ds = ps->ds;
u16 output_ports = 0;
int reg;
int i;
@@ -1110,7 +1344,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
output_ports = mask;
} else {
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
/* allow sending frames to every group member */
if (bridge && ps->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
@@ -1124,20 +1358,25 @@ static int _mv88e6xxx_port_based_vlan_map(struct dsa_switch *ds, int port)
/* prevent frames from going back out of the port they came in on */
output_ports &= ~BIT(port);
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
if (reg < 0)
return reg;
reg &= ~mask;
reg |= output_ports & mask;
- return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
+ return _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN, reg);
}
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
+static void mv88e6xxx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int stp_state;
+ int err;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_PORTSTATE))
+ return;
switch (state) {
case BR_STATE_DISABLED:
@@ -1156,23 +1395,23 @@ int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
break;
}
- /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
- * so we can not update the port state directly but need to schedule it.
- */
- ps->ports[port].state = stp_state;
- set_bit(port, ps->port_state_update_mask);
- schedule_work(&ps->bridge_work);
+ mutex_lock(&ps->smi_mutex);
+ err = _mv88e6xxx_port_state(ps, port, stp_state);
+ mutex_unlock(&ps->smi_mutex);
- return 0;
+ if (err)
+ netdev_err(ds->ports[port], "failed to update state to %s\n",
+ mv88e6xxx_port_state_names[stp_state]);
}
-static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
- u16 *old)
+static int _mv88e6xxx_port_pvid(struct mv88e6xxx_priv_state *ps, int port,
+ u16 *new, u16 *old)
{
+ struct dsa_switch *ds = ps->ds;
u16 pvid;
int ret;
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_DEFAULT_VLAN);
if (ret < 0)
return ret;
@@ -1182,7 +1421,7 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
ret &= ~PORT_DEFAULT_VLAN_MASK;
ret |= *new & PORT_DEFAULT_VLAN_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_DEFAULT_VLAN, ret);
if (ret < 0)
return ret;
@@ -1197,55 +1436,56 @@ static int _mv88e6xxx_port_pvid(struct dsa_switch *ds, int port, u16 *new,
return 0;
}
-static int _mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
+static int _mv88e6xxx_port_pvid_get(struct mv88e6xxx_priv_state *ps,
+ int port, u16 *pvid)
{
- return _mv88e6xxx_port_pvid(ds, port, NULL, pvid);
+ return _mv88e6xxx_port_pvid(ps, port, NULL, pvid);
}
-static int _mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
+static int _mv88e6xxx_port_pvid_set(struct mv88e6xxx_priv_state *ps,
+ int port, u16 pvid)
{
- return _mv88e6xxx_port_pvid(ds, port, &pvid, NULL);
+ return _mv88e6xxx_port_pvid(ps, port, &pvid, NULL);
}
-static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_wait(struct mv88e6xxx_priv_state *ps)
{
- return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
+ return _mv88e6xxx_wait(ps, REG_GLOBAL, GLOBAL_VTU_OP,
GLOBAL_VTU_OP_BUSY);
}
-static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
+static int _mv88e6xxx_vtu_cmd(struct mv88e6xxx_priv_state *ps, u16 op)
{
int ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_OP, op);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_wait(ds);
+ return _mv88e6xxx_vtu_wait(ps);
}
-static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
+static int _mv88e6xxx_vtu_stu_flush(struct mv88e6xxx_priv_state *ps)
{
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
+ return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_FLUSH_ALL);
}
-static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_stu_data_read(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 regs[3];
int i;
int ret;
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i);
if (ret < 0)
return ret;
@@ -1253,7 +1493,7 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
regs[i] = ret;
}
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u16 reg = regs[i / 4];
@@ -1263,16 +1503,27 @@ static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
+static int mv88e6xxx_vtu_data_read(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_read(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_read(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_stu_data_write(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry,
unsigned int nibble_offset)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 regs[3] = { 0 };
int i;
int ret;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
unsigned int shift = (i % 4) * 4 + nibble_offset;
u8 data = entry->data[i];
@@ -1280,7 +1531,7 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
}
for (i = 0; i < 3; ++i) {
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL,
GLOBAL_VTU_DATA_0_3 + i, regs[i]);
if (ret < 0)
return ret;
@@ -1289,27 +1540,39 @@ static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_vtu_vid_write(struct dsa_switch *ds, u16 vid)
+static int mv88e6xxx_vtu_data_write(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(ps, entry, 0);
+}
+
+static int mv88e6xxx_stu_data_write(struct mv88e6xxx_priv_state *ps,
+ struct mv88e6xxx_vtu_stu_entry *entry)
+{
+ return _mv88e6xxx_vtu_stu_data_write(ps, entry, 2);
+}
+
+static int _mv88e6xxx_vtu_vid_write(struct mv88e6xxx_priv_state *ps, u16 vid)
{
- return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
+ return _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID,
vid & GLOBAL_VTU_VID_MASK);
}
-static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_getnext(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_VTU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
@@ -1317,20 +1580,32 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
+ ret = mv88e6xxx_vtu_data_read(ps, &next);
if (ret < 0)
return ret;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ if (mv88e6xxx_has_fid_reg(ps)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_FID);
if (ret < 0)
return ret;
next.fid = ret & GLOBAL_VTU_FID_MASK;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
+ GLOBAL_VTU_OP);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ next.fid = (ret & 0xf00) >> 4;
+ next.fid |= ret & 0xf;
+ }
+
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_VTU_SID);
if (ret < 0)
return ret;
@@ -1343,27 +1618,30 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds,
return 0;
}
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_vlan *vlan,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry next;
u16 pvid;
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
if (err)
goto unlock;
- err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &next);
+ err = _mv88e6xxx_vtu_getnext(ps, &next);
if (err)
break;
@@ -1394,13 +1672,14 @@ unlock:
return err;
}
-static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_vtu_loadpurge(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
+ u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE;
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
@@ -1408,66 +1687,73 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
goto loadpurge;
/* Write port member tags */
- ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
+ ret = mv88e6xxx_vtu_data_write(ps, entry);
if (ret < 0)
return ret;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_STU)) {
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
if (ret < 0)
return ret;
+ }
+ if (mv88e6xxx_has_fid_reg(ps)) {
reg = entry->fid & GLOBAL_VTU_FID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_FID, reg);
if (ret < 0)
return ret;
+ } else if (mv88e6xxx_num_databases(ps) == 256) {
+ /* VTU DBNum[7:4] are located in VTU Operation 11:8, and
+ * VTU DBNum[3:0] are located in VTU Operation 3:0
+ */
+ op |= (entry->fid & 0xf0) << 8;
+ op |= entry->fid & 0xf;
}
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
reg |= entry->vid & GLOBAL_VTU_VID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
+ return _mv88e6xxx_vtu_cmd(ps, op);
}
-static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
+static int _mv88e6xxx_stu_getnext(struct mv88e6xxx_priv_state *ps, u8 sid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
struct mv88e6xxx_vtu_stu_entry next = { 0 };
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID,
sid & GLOBAL_VTU_SID_MASK);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
+ ret = _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_GET_NEXT);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_SID);
if (ret < 0)
return ret;
next.sid = ret & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_VTU_VID);
if (ret < 0)
return ret;
next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
if (next.valid) {
- ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
+ ret = mv88e6xxx_stu_data_read(ps, &next);
if (ret < 0)
return ret;
}
@@ -1476,13 +1762,13 @@ static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
return 0;
}
-static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
+static int _mv88e6xxx_stu_loadpurge(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_vtu_stu_entry *entry)
{
u16 reg = 0;
int ret;
- ret = _mv88e6xxx_vtu_wait(ds);
+ ret = _mv88e6xxx_vtu_wait(ps);
if (ret < 0)
return ret;
@@ -1490,32 +1776,41 @@ static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
goto loadpurge;
/* Write port states */
- ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
+ ret = mv88e6xxx_stu_data_write(ps, entry);
if (ret < 0)
return ret;
reg = GLOBAL_VTU_VID_VALID;
loadpurge:
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_VID, reg);
if (ret < 0)
return ret;
reg = entry->sid & GLOBAL_VTU_SID_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_VTU_SID, reg);
if (ret < 0)
return ret;
- return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
+ return _mv88e6xxx_vtu_cmd(ps, GLOBAL_VTU_OP_STU_LOAD_PURGE);
}
-static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
- u16 *old)
+static int _mv88e6xxx_port_fid(struct mv88e6xxx_priv_state *ps, int port,
+ u16 *new, u16 *old)
{
+ struct dsa_switch *ds = ps->ds;
+ u16 upper_mask;
u16 fid;
int ret;
+ if (mv88e6xxx_num_databases(ps) == 4096)
+ upper_mask = 0xff;
+ else if (mv88e6xxx_num_databases(ps) == 256)
+ upper_mask = 0xf;
+ else
+ return -EOPNOTSUPP;
+
/* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_BASE_VLAN);
if (ret < 0)
return ret;
@@ -1525,24 +1820,24 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
ret &= ~PORT_BASE_VLAN_FID_3_0_MASK;
ret |= (*new << 12) & PORT_BASE_VLAN_FID_3_0_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_BASE_VLAN,
ret);
if (ret < 0)
return ret;
}
/* Port's default FID bits 11:4 are located in reg 0x05, offset 0 */
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_1);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_1);
if (ret < 0)
return ret;
- fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4;
+ fid |= (ret & upper_mask) << 4;
if (new) {
- ret &= ~PORT_CONTROL_1_FID_11_4_MASK;
- ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK;
+ ret &= ~upper_mask;
+ ret |= (*new >> 4) & upper_mask;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1,
ret);
if (ret < 0)
return ret;
@@ -1556,19 +1851,20 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new,
return 0;
}
-static int _mv88e6xxx_port_fid_get(struct dsa_switch *ds, int port, u16 *fid)
+static int _mv88e6xxx_port_fid_get(struct mv88e6xxx_priv_state *ps,
+ int port, u16 *fid)
{
- return _mv88e6xxx_port_fid(ds, port, NULL, fid);
+ return _mv88e6xxx_port_fid(ps, port, NULL, fid);
}
-static int _mv88e6xxx_port_fid_set(struct dsa_switch *ds, int port, u16 fid)
+static int _mv88e6xxx_port_fid_set(struct mv88e6xxx_priv_state *ps,
+ int port, u16 fid)
{
- return _mv88e6xxx_port_fid(ds, port, &fid, NULL);
+ return _mv88e6xxx_port_fid(ps, port, &fid, NULL);
}
-static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
+static int _mv88e6xxx_fid_new(struct mv88e6xxx_priv_state *ps, u16 *fid)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
DECLARE_BITMAP(fid_bitmap, MV88E6XXX_N_FID);
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
@@ -1576,8 +1872,8 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
bitmap_zero(fid_bitmap, MV88E6XXX_N_FID);
/* Set every FID bit used by the (un)bridged ports */
- for (i = 0; i < ps->num_ports; ++i) {
- err = _mv88e6xxx_port_fid_get(ds, i, fid);
+ for (i = 0; i < ps->info->num_ports; ++i) {
+ err = _mv88e6xxx_port_fid_get(ps, i, fid);
if (err)
return err;
@@ -1585,12 +1881,12 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
}
/* Set every FID bit used by the VLAN entries */
- err = _mv88e6xxx_vtu_vid_write(ds, GLOBAL_VTU_VID_MASK);
+ err = _mv88e6xxx_vtu_vid_write(ps, GLOBAL_VTU_VID_MASK);
if (err)
return err;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
return err;
@@ -1604,35 +1900,35 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid)
* databases are not needed. Return the next positive available.
*/
*fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1);
- if (unlikely(*fid == MV88E6XXX_N_FID))
+ if (unlikely(*fid >= mv88e6xxx_num_databases(ps)))
return -ENOSPC;
/* Clear the database */
- return _mv88e6xxx_atu_flush(ds, *fid, true);
+ return _mv88e6xxx_atu_flush(ps, *fid, true);
}
-static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_new(struct mv88e6xxx_priv_state *ps, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
struct mv88e6xxx_vtu_stu_entry vlan = {
.valid = true,
.vid = vid,
};
int i, err;
- err = _mv88e6xxx_fid_new(ds, &vlan.fid);
+ err = _mv88e6xxx_fid_new(ps, &vlan.fid);
if (err)
return err;
/* exclude all ports except the CPU and DSA ports */
- for (i = 0; i < ps->num_ports; ++i)
+ for (i = 0; i < ps->info->num_ports; ++i)
vlan.data[i] = dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)
? GLOBAL_VTU_DATA_MEMBER_TAG_UNMODIFIED
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
- if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
- mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
+ if (mv88e6xxx_6097_family(ps) || mv88e6xxx_6165_family(ps) ||
+ mv88e6xxx_6351_family(ps) || mv88e6xxx_6352_family(ps)) {
struct mv88e6xxx_vtu_stu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1640,7 +1936,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
* entries. Thus, validate the SID 0.
*/
vlan.sid = 0;
- err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
+ err = _mv88e6xxx_stu_getnext(ps, GLOBAL_VTU_SID_MASK, &vstp);
if (err)
return err;
@@ -1649,7 +1945,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
vstp.valid = true;
vstp.sid = vlan.sid;
- err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
+ err = _mv88e6xxx_stu_loadpurge(ps, &vstp);
if (err)
return err;
}
@@ -1659,7 +1955,7 @@ static int _mv88e6xxx_vtu_new(struct dsa_switch *ds, u16 vid,
return 0;
}
-static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
+static int _mv88e6xxx_vtu_get(struct mv88e6xxx_priv_state *ps, u16 vid,
struct mv88e6xxx_vtu_stu_entry *entry, bool creat)
{
int err;
@@ -1667,11 +1963,11 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
if (!vid)
return -EINVAL;
- err = _mv88e6xxx_vtu_vid_write(ds, vid - 1);
+ err = _mv88e6xxx_vtu_vid_write(ps, vid - 1);
if (err)
return err;
- err = _mv88e6xxx_vtu_getnext(ds, entry);
+ err = _mv88e6xxx_vtu_getnext(ps, entry);
if (err)
return err;
@@ -1682,7 +1978,7 @@ static int _mv88e6xxx_vtu_get(struct dsa_switch *ds, u16 vid,
* -EOPNOTSUPP to inform bridge about an eventual software VLAN.
*/
- err = _mv88e6xxx_vtu_new(ds, vid, entry);
+ err = _mv88e6xxx_vtu_new(ps, vid, entry);
}
return err;
@@ -1700,12 +1996,12 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_vtu_vid_write(ds, vid_begin - 1);
+ err = _mv88e6xxx_vtu_vid_write(ps, vid_begin - 1);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
goto unlock;
@@ -1715,7 +2011,7 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
if (vlan.vid > vid_end)
break;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (dsa_is_dsa_port(ds, i) || dsa_is_cpu_port(ds, i))
continue;
@@ -1749,17 +2045,20 @@ static const char * const mv88e6xxx_port_8021q_mode_names[] = {
[PORT_CONTROL_2_8021Q_SECURE] = "Secure",
};
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering)
+static int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 old, new = vlan_filtering ? PORT_CONTROL_2_8021Q_SECURE :
PORT_CONTROL_2_8021Q_DISABLED;
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL_2);
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_CONTROL_2);
if (ret < 0)
goto unlock;
@@ -1769,7 +2068,7 @@ int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
ret &= ~PORT_CONTROL_2_8021Q_MASK;
ret |= new & PORT_CONTROL_2_8021Q_MASK;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_2,
ret);
if (ret < 0)
goto unlock;
@@ -1786,12 +2085,16 @@ unlock:
return ret;
}
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
/* If the requested port doesn't belong to the same bridge as the VLAN
* members, do not support it (yet) and fallback to software VLAN.
*/
@@ -1806,13 +2109,13 @@ int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
return 0;
}
-static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
- bool untagged)
+static int _mv88e6xxx_port_vlan_add(struct mv88e6xxx_priv_state *ps, int port,
+ u16 vid, bool untagged)
{
struct mv88e6xxx_vtu_stu_entry vlan;
int err;
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, true);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, true);
if (err)
return err;
@@ -1820,43 +2123,43 @@ static int _mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
- return _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+ return _mv88e6xxx_vtu_loadpurge(ps, &vlan);
}
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans)
+static void mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct switchdev_trans *trans)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
u16 vid;
- int err = 0;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return;
mutex_lock(&ps->smi_mutex);
- for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_add(ds, port, vid, untagged);
- if (err)
- goto unlock;
- }
+ for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid)
+ if (_mv88e6xxx_port_vlan_add(ps, port, vid, untagged))
+ netdev_err(ds->ports[port], "failed to add VLAN %d%c\n",
+ vid, untagged ? 'u' : 't');
- /* no PVID with ranges, otherwise it's a bug */
- if (pvid)
- err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
-unlock:
- mutex_unlock(&ps->smi_mutex);
+ if (pvid && _mv88e6xxx_port_pvid_set(ps, port, vlan->vid_end))
+ netdev_err(ds->ports[port], "failed to set PVID %d\n",
+ vlan->vid_end);
- return err;
+ mutex_unlock(&ps->smi_mutex);
}
-static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+static int _mv88e6xxx_port_vlan_del(struct mv88e6xxx_priv_state *ps,
+ int port, u16 vid)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
struct mv88e6xxx_vtu_stu_entry vlan;
int i, err;
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
if (err)
return err;
@@ -1868,7 +2171,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
/* keep the VLAN unless all ports are excluded */
vlan.valid = false;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
@@ -1878,33 +2181,36 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
}
}
- err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
+ err = _mv88e6xxx_vtu_loadpurge(ps, &vlan);
if (err)
return err;
- return _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
+ return _mv88e6xxx_atu_remove(ps, vlan.fid, port, false);
}
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan)
+static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
u16 pvid, vid;
int err = 0;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VTU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_pvid_get(ds, port, &pvid);
+ err = _mv88e6xxx_port_pvid_get(ps, port, &pvid);
if (err)
goto unlock;
for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
- err = _mv88e6xxx_port_vlan_del(ds, port, vid);
+ err = _mv88e6xxx_port_vlan_del(ps, port, vid);
if (err)
goto unlock;
if (vid == pvid) {
- err = _mv88e6xxx_port_pvid_set(ds, port, 0);
+ err = _mv88e6xxx_port_pvid_set(ps, port, 0);
if (err)
goto unlock;
}
@@ -1916,14 +2222,14 @@ unlock:
return err;
}
-static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_mac_write(struct mv88e6xxx_priv_state *ps,
const unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
ret = _mv88e6xxx_reg_write(
- ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
+ ps, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
(addr[i * 2] << 8) | addr[i * 2 + 1]);
if (ret < 0)
return ret;
@@ -1932,12 +2238,13 @@ static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
return 0;
}
-static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
+static int _mv88e6xxx_atu_mac_read(struct mv88e6xxx_priv_state *ps,
+ unsigned char *addr)
{
int i, ret;
for (i = 0; i < 3; i++) {
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL,
GLOBAL_ATU_MAC_01 + i);
if (ret < 0)
return ret;
@@ -1948,31 +2255,27 @@ static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
return 0;
}
-static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
+static int _mv88e6xxx_atu_load(struct mv88e6xxx_priv_state *ps,
struct mv88e6xxx_atu_entry *entry)
{
int ret;
- ret = _mv88e6xxx_atu_wait(ds);
+ ret = _mv88e6xxx_atu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
+ ret = _mv88e6xxx_atu_mac_write(ps, entry->mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_data_write(ds, entry);
+ ret = _mv88e6xxx_atu_data_write(ps, entry);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
- if (ret < 0)
- return ret;
-
- return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
+ return _mv88e6xxx_atu_cmd(ps, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
}
-static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
+static int _mv88e6xxx_port_fdb_load(struct mv88e6xxx_priv_state *ps, int port,
const unsigned char *addr, u16 vid,
u8 state)
{
@@ -1982,9 +2285,9 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
/* Null VLAN ID corresponds to the port private database */
if (vid == 0)
- err = _mv88e6xxx_port_fid_get(ds, port, &vlan.fid);
+ err = _mv88e6xxx_port_fid_get(ps, port, &vlan.fid);
else
- err = _mv88e6xxx_vtu_get(ds, vid, &vlan, false);
+ err = _mv88e6xxx_vtu_get(ps, vid, &vlan, false);
if (err)
return err;
@@ -1996,51 +2299,60 @@ static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
entry.portv_trunkid = BIT(port);
}
- return _mv88e6xxx_atu_load(ds, &entry);
+ return _mv88e6xxx_atu_load(ps, &entry);
}
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
/* We don't need any dynamic resource from the kernel (yet),
* so skip the prepare phase.
*/
return 0;
}
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans)
+static void mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb,
+ struct switchdev_trans *trans)
{
int state = is_multicast_ether_addr(fdb->addr) ?
GLOBAL_ATU_DATA_STATE_MC_STATIC :
GLOBAL_ATU_DATA_STATE_UC_STATIC;
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
+ if (_mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid, state))
+ netdev_err(ds->ports[port], "failed to load MAC address\n");
mutex_unlock(&ps->smi_mutex);
-
- return ret;
}
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb)
+static int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_fdb *fdb)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int ret;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
+ ret = _mv88e6xxx_port_fdb_load(ps, port, fdb->addr, fdb->vid,
GLOBAL_ATU_DATA_STATE_UNUSED);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
+static int _mv88e6xxx_atu_getnext(struct mv88e6xxx_priv_state *ps, u16 fid,
struct mv88e6xxx_atu_entry *entry)
{
struct mv88e6xxx_atu_entry next = { 0 };
@@ -2048,23 +2360,19 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
next.fid = fid;
- ret = _mv88e6xxx_atu_wait(ds);
+ ret = _mv88e6xxx_atu_wait(ps);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
+ ret = _mv88e6xxx_atu_cmd(ps, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
+ ret = _mv88e6xxx_atu_mac_read(ps, next.mac);
if (ret < 0)
return ret;
- ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
- if (ret < 0)
- return ret;
-
- ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, GLOBAL_ATU_DATA);
if (ret < 0)
return ret;
@@ -2089,8 +2397,8 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
return 0;
}
-static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
- int port,
+static int _mv88e6xxx_port_fdb_dump_one(struct mv88e6xxx_priv_state *ps,
+ u16 fid, u16 vid, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj))
{
@@ -2099,12 +2407,12 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
};
int err;
- err = _mv88e6xxx_atu_mac_write(ds, addr.mac);
+ err = _mv88e6xxx_atu_mac_write(ps, addr.mac);
if (err)
return err;
do {
- err = _mv88e6xxx_atu_getnext(ds, fid, &addr);
+ err = _mv88e6xxx_atu_getnext(ps, fid, &addr);
if (err)
break;
@@ -2130,9 +2438,9 @@ static int _mv88e6xxx_port_fdb_dump_one(struct dsa_switch *ds, u16 fid, u16 vid,
return err;
}
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj))
+static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
+ struct switchdev_obj_port_fdb *fdb,
+ int (*cb)(struct switchdev_obj *obj))
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct mv88e6xxx_vtu_stu_entry vlan = {
@@ -2141,31 +2449,34 @@ int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
u16 fid;
int err;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_ATU))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
/* Dump port's default Filtering Information Database (VLAN ID 0) */
- err = _mv88e6xxx_port_fid_get(ds, port, &fid);
+ err = _mv88e6xxx_port_fid_get(ps, port, &fid);
if (err)
goto unlock;
- err = _mv88e6xxx_port_fdb_dump_one(ds, fid, 0, port, fdb, cb);
+ err = _mv88e6xxx_port_fdb_dump_one(ps, fid, 0, port, fdb, cb);
if (err)
goto unlock;
/* Dump VLANs' Filtering Information Databases */
- err = _mv88e6xxx_vtu_vid_write(ds, vlan.vid);
+ err = _mv88e6xxx_vtu_vid_write(ps, vlan.vid);
if (err)
goto unlock;
do {
- err = _mv88e6xxx_vtu_getnext(ds, &vlan);
+ err = _mv88e6xxx_vtu_getnext(ps, &vlan);
if (err)
break;
if (!vlan.valid)
break;
- err = _mv88e6xxx_port_fdb_dump_one(ds, vlan.fid, vlan.vid, port,
+ err = _mv88e6xxx_port_fdb_dump_one(ps, vlan.fid, vlan.vid, port,
fdb, cb);
if (err)
break;
@@ -2177,20 +2488,23 @@ unlock:
return err;
}
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
int i, err = 0;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ return -EOPNOTSUPP;
+
mutex_lock(&ps->smi_mutex);
/* Assign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = bridge;
- for (i = 0; i < ps->num_ports; ++i) {
+ for (i = 0; i < ps->info->num_ports; ++i) {
if (ps->ports[i].bridge_dev == bridge) {
- err = _mv88e6xxx_port_based_vlan_map(ds, i);
+ err = _mv88e6xxx_port_based_vlan_map(ps, i);
if (err)
break;
}
@@ -2201,89 +2515,134 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
return err;
}
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
struct net_device *bridge = ps->ports[port].bridge_dev;
int i;
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_VLANTABLE))
+ return;
+
mutex_lock(&ps->smi_mutex);
/* Unassign the bridge and remap each port's VLANTable */
ps->ports[port].bridge_dev = NULL;
- for (i = 0; i < ps->num_ports; ++i)
+ for (i = 0; i < ps->info->num_ports; ++i)
if (i == port || ps->ports[i].bridge_dev == bridge)
- if (_mv88e6xxx_port_based_vlan_map(ds, i))
+ if (_mv88e6xxx_port_based_vlan_map(ps, i))
netdev_warn(ds->ports[i], "failed to remap\n");
mutex_unlock(&ps->smi_mutex);
}
-static void mv88e6xxx_bridge_work(struct work_struct *work)
+static int _mv88e6xxx_phy_page_write(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg, int val)
{
- struct mv88e6xxx_priv_state *ps;
- struct dsa_switch *ds;
- int port;
-
- ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
- ds = ((struct dsa_switch *)ps) - 1;
+ int ret;
- mutex_lock(&ps->smi_mutex);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
+ if (ret < 0)
+ goto restore_page_0;
- for (port = 0; port < ps->num_ports; ++port)
- if (test_and_clear_bit(port, ps->port_state_update_mask) &&
- _mv88e6xxx_port_state(ds, port, ps->ports[port].state))
- netdev_warn(ds->ports[port], "failed to update state to %s\n",
- mv88e6xxx_port_state_names[ps->ports[port].state]);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, reg, val);
+restore_page_0:
+ _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
- mutex_unlock(&ps->smi_mutex);
+ return ret;
}
-static int _mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val)
+static int _mv88e6xxx_phy_page_read(struct mv88e6xxx_priv_state *ps,
+ int port, int page, int reg)
{
int ret;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
+ ret = _mv88e6xxx_phy_write_indirect(ps, port, 0x16, page);
if (ret < 0)
goto restore_page_0;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
+ ret = _mv88e6xxx_phy_read_indirect(ps, port, reg);
restore_page_0:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ _mv88e6xxx_phy_write_indirect(ps, port, 0x16, 0x0);
return ret;
}
-static int _mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page,
- int reg)
+static int mv88e6xxx_switch_reset(struct mv88e6xxx_priv_state *ps)
{
+ bool ppu_active = mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE);
+ u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
+ struct gpio_desc *gpiod = ps->reset;
+ unsigned long timeout;
int ret;
+ int i;
- ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
- if (ret < 0)
- goto restore_page_0;
+ /* Set all ports to the disabled state. */
+ for (i = 0; i < ps->info->num_ports; i++) {
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(i), PORT_CONTROL);
+ if (ret < 0)
+ return ret;
- ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
-restore_page_0:
- _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(i), PORT_CONTROL,
+ ret & 0xfffc);
+ if (ret)
+ return ret;
+ }
+
+ /* Wait for transmit queues to drain. */
+ usleep_range(2000, 4000);
+
+ /* If there is a gpio connected to the reset pin, toggle it */
+ if (gpiod) {
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+ usleep_range(10000, 20000);
+ }
+
+ /* Reset the switch. Keep the PPU active if requested. The PPU
+ * needs to be active to support indirect phy register access
+ * through global registers 0x18 and 0x19.
+ */
+ if (ppu_active)
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc000);
+ else
+ ret = _mv88e6xxx_reg_write(ps, REG_GLOBAL, 0x04, 0xc400);
+ if (ret)
+ return ret;
+
+ /* Wait up to one second for reset to complete. */
+ timeout = jiffies + 1 * HZ;
+ while (time_before(jiffies, timeout)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_GLOBAL, 0x00);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & is_reset) == is_reset)
+ break;
+ usleep_range(1000, 2000);
+ }
+ if (time_after(jiffies, timeout))
+ ret = -ETIMEDOUT;
+ else
+ ret = 0;
return ret;
}
-static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
+static int mv88e6xxx_power_on_serdes(struct mv88e6xxx_priv_state *ps)
{
int ret;
- ret = _mv88e6xxx_phy_page_read(ds, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
+ ret = _mv88e6xxx_phy_page_read(ps, REG_FIBER_SERDES, PAGE_FIBER_SERDES,
MII_BMCR);
if (ret < 0)
return ret;
if (ret & BMCR_PDOWN) {
ret &= ~BMCR_PDOWN;
- ret = _mv88e6xxx_phy_page_write(ds, REG_FIBER_SERDES,
+ ret = _mv88e6xxx_phy_page_write(ps, REG_FIBER_SERDES,
PAGE_FIBER_SERDES, MII_BMCR,
ret);
}
@@ -2291,32 +2650,30 @@ static int mv88e6xxx_power_on_serdes(struct dsa_switch *ds)
return ret;
}
-static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
+static int mv88e6xxx_setup_port(struct mv88e6xxx_priv_state *ps, int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ struct dsa_switch *ds = ps->ds;
int ret;
u16 reg;
- mutex_lock(&ps->smi_mutex);
-
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6065_family(ps) || mv88e6xxx_6320_family(ps)) {
/* MAC Forcing register: don't force link, speed,
* duplex or flow control state to any particular
* values on physical ports, but force the CPU port
* and all DSA ports to their maximum bandwidth and
* full duplex.
*/
- reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
+ reg = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_PCS_CTRL);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
PORT_PCS_CTRL_FORCE_DUPLEX;
- if (mv88e6xxx_6065_family(ds))
+ if (mv88e6xxx_6065_family(ps))
reg |= PORT_PCS_CTRL_100;
else
reg |= PORT_PCS_CTRL_1000;
@@ -2324,10 +2681,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_PCS_CTRL_UNFORCED;
}
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PCS_CTRL, reg);
if (ret)
- goto abort;
+ return ret;
}
/* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
@@ -2345,19 +2702,19 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* forwarding of unknown unicasts and multicasts.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps))
reg = PORT_CONTROL_IGMP_MLD_SNOOP |
PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
PORT_CONTROL_STATE_FORWARDING;
if (dsa_is_cpu_port(ds, port)) {
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
@@ -2366,20 +2723,20 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6065_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6320_family(ps)) {
if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
reg |= PORT_CONTROL_EGRESS_ADD_TAG;
}
}
if (dsa_is_dsa_port(ds, port)) {
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps))
reg |= PORT_CONTROL_DSA_TAG;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
reg |= PORT_CONTROL_FRAME_MODE_DSA;
}
@@ -2388,26 +2745,26 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (reg) {
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_CONTROL, reg);
if (ret)
- goto abort;
+ return ret;
}
/* If this port is connected to a SerDes, make sure the SerDes is not
* powered down.
*/
- if (mv88e6xxx_6352_family(ds)) {
- ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
+ if (mv88e6xxx_6352_family(ps)) {
+ ret = _mv88e6xxx_reg_read(ps, REG_PORT(port), PORT_STATUS);
if (ret < 0)
- goto abort;
+ return ret;
ret &= PORT_STATUS_CMODE_MASK;
if ((ret == PORT_STATUS_CMODE_100BASE_X) ||
(ret == PORT_STATUS_CMODE_1000BASE_X) ||
(ret == PORT_STATUS_CMODE_SGMII)) {
- ret = mv88e6xxx_power_on_serdes(ds);
+ ret = mv88e6xxx_power_on_serdes(ps);
if (ret < 0)
- goto abort;
+ return ret;
}
}
@@ -2418,16 +2775,17 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
* copy of all transmitted/received frames on this port to the CPU.
*/
reg = 0;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6095_family(ps) || mv88e6xxx_6320_family(ps) ||
+ mv88e6xxx_6185_family(ps))
reg = PORT_CONTROL_2_MAP_DA;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6320_family(ps))
reg |= PORT_CONTROL_2_JUMBO_10240;
- if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
+ if (mv88e6xxx_6095_family(ps) || mv88e6xxx_6185_family(ps)) {
/* Set the upstream port this port should use */
reg |= dsa_upstream_port(ds);
/* enable forwarding of unknown multicast addresses to
@@ -2440,10 +2798,10 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_2_8021Q_DISABLED;
if (reg) {
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_CONTROL_2, reg);
if (ret)
- goto abort;
+ return ret;
}
/* Port Association Vector: when learning source addresses
@@ -2456,300 +2814,344 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
if (dsa_is_cpu_port(ds, port))
reg = 0;
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_ASSOC_VECTOR, reg);
if (ret)
- goto abort;
+ return ret;
/* Egress rate control 2: disable egress rate control. */
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_RATE_CONTROL_2,
0x0000);
if (ret)
- goto abort;
+ return ret;
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Do not limit the period of time that this port can
* be paused for by the remote end or the period of
* time that this port can pause the remote end.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PAUSE_CTRL, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_ATU_CONTROL, 0x0000);
/* Priority Override: disable DA, SA and VTU priority
* override.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_PRI_OVERRIDE, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port Ethertype: use the Ethertype DSA Ethertype
* value.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_ETH_TYPE, ETH_P_EDSA);
if (ret)
- goto abort;
+ return ret;
/* Tag Remap: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_TAG_REGMAP_0123, 0x3210);
if (ret)
- goto abort;
+ return ret;
/* Tag Remap 2: use an identity 802.1p prio -> switch
* prio mapping.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_TAG_REGMAP_4567, 0x7654);
if (ret)
- goto abort;
+ return ret;
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Rate Control: disable ingress rate limiting. */
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port),
PORT_RATE_CONTROL, 0x0001);
if (ret)
- goto abort;
+ return ret;
}
/* Port Control 1: disable trunking, disable sending
* learning messages to this port.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_CONTROL_1, 0x0000);
if (ret)
- goto abort;
+ return ret;
/* Port based VLAN map: give each port the same default address
* database, and allow bidirectional communication between the
* CPU and DSA port(s), and the other ports.
*/
- ret = _mv88e6xxx_port_fid_set(ds, port, 0);
+ ret = _mv88e6xxx_port_fid_set(ps, port, 0);
if (ret)
- goto abort;
+ return ret;
- ret = _mv88e6xxx_port_based_vlan_map(ds, port);
+ ret = _mv88e6xxx_port_based_vlan_map(ps, port);
if (ret)
- goto abort;
+ return ret;
/* Default VLAN ID and priority: don't set a default VLAN
* ID, and set the default packet priority to zero.
*/
- ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
+ ret = _mv88e6xxx_reg_write(ps, REG_PORT(port), PORT_DEFAULT_VLAN,
0x0000);
-abort:
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int mv88e6xxx_setup_ports(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int i;
+ if (ret)
+ return ret;
- for (i = 0; i < ps->num_ports; i++) {
- ret = mv88e6xxx_setup_port(ds, i);
- if (ret < 0)
- return ret;
- }
return 0;
}
-int mv88e6xxx_setup_common(struct dsa_switch *ds)
+static int mv88e6xxx_setup_global(struct mv88e6xxx_priv_state *ps)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- mutex_init(&ps->smi_mutex);
+ struct dsa_switch *ds = ps->ds;
+ u32 upstream_port = dsa_upstream_port(ds);
+ u16 reg;
+ int err;
+ int i;
- ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
+ /* Enable the PHY Polling Unit if present, don't discard any packets,
+ * and mask all interrupt sources.
+ */
+ reg = 0;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU) ||
+ mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU_ACTIVE))
+ reg |= GLOBAL_CONTROL_PPU_ENABLE;
- INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL, reg);
+ if (err)
+ return err;
- return 0;
-}
+ /* Configure the upstream port, and configure it as the port to which
+ * ingress and egress and ARP monitor frames are to be sent.
+ */
+ reg = upstream_port << GLOBAL_MONITOR_CONTROL_INGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_EGRESS_SHIFT |
+ upstream_port << GLOBAL_MONITOR_CONTROL_ARP_SHIFT;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_MONITOR_CONTROL, reg);
+ if (err)
+ return err;
-int mv88e6xxx_setup_global(struct dsa_switch *ds)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int ret;
- int i;
+ /* Disable remote management, and set the switch's DSA device number. */
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_CONTROL_2,
+ GLOBAL_CONTROL_2_MULTIPLE_CASCADE |
+ (ds->index & 0x1f));
+ if (err)
+ return err;
/* Set the default address aging time to 5 minutes, and
* enable address learn messages to be sent to all message
* ports.
*/
- REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
- 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_ATU_CONTROL,
+ 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
+ if (err)
+ return err;
/* Configure the IP ToS mapping registers. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
- REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
+ if (err)
+ return err;
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
+ if (err)
+ return err;
/* Configure the IEEE 802.1p priority mapping register. */
- REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
+ if (err)
+ return err;
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:0x to the CPU port.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
+ if (err)
+ return err;
/* Ignore removed tag data on doubly tagged packets, disable
* flow control messages, force flow control priority to the
* highest, and send all special multicast frames to the CPU
* port at the highest priority.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
- 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
- GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
+ 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
+ GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
+ if (err)
+ return err;
/* Program the DSA routing table. */
for (i = 0; i < 32; i++) {
int nexthop = 0x1f;
- if (ds->pd->rtable &&
- i != ds->index && i < ds->dst->pd->nr_chips)
- nexthop = ds->pd->rtable[i] & 0x1f;
+ if (ps->ds->cd->rtable &&
+ i != ps->ds->index && i < ps->ds->dst->pd->nr_chips)
+ nexthop = ps->ds->cd->rtable[i] & 0x1f;
- REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
- GLOBAL2_DEVICE_MAPPING_UPDATE |
- (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
- nexthop);
+ err = _mv88e6xxx_reg_write(
+ ps, REG_GLOBAL2,
+ GLOBAL2_DEVICE_MAPPING,
+ GLOBAL2_DEVICE_MAPPING_UPDATE |
+ (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) | nexthop);
+ if (err)
+ return err;
}
/* Clear all trunk masks. */
- for (i = 0; i < 8; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
- 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
- ((1 << ps->num_ports) - 1));
+ for (i = 0; i < 8; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
+ 0x8000 |
+ (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
+ ((1 << ps->info->num_ports) - 1));
+ if (err)
+ return err;
+ }
/* Clear all trunk mappings. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
- GLOBAL2_TRUNK_MAPPING_UPDATE |
- (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
-
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ for (i = 0; i < 16; i++) {
+ err = _mv88e6xxx_reg_write(
+ ps, REG_GLOBAL2,
+ GLOBAL2_TRUNK_MAPPING,
+ GLOBAL2_TRUNK_MAPPING_UPDATE |
+ (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
+ if (err)
+ return err;
+ }
+
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Send all frames with destination addresses matching
* 01:80:c2:00:00:2x to the CPU port.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_MGMT_EN_2X, 0xffff);
+ if (err)
+ return err;
/* Initialise cross-chip port VLAN table to reset
* defaults.
*/
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_PVT_ADDR, 0x9000);
+ if (err)
+ return err;
/* Clear the priority override table. */
- for (i = 0; i < 16; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
- 0x8000 | (i << 8));
+ for (i = 0; i < 16; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_PRIO_OVERRIDE,
+ 0x8000 | (i << 8));
+ if (err)
+ return err;
+ }
}
- if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
- mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
- mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
- mv88e6xxx_6320_family(ds)) {
+ if (mv88e6xxx_6352_family(ps) || mv88e6xxx_6351_family(ps) ||
+ mv88e6xxx_6165_family(ps) || mv88e6xxx_6097_family(ps) ||
+ mv88e6xxx_6185_family(ps) || mv88e6xxx_6095_family(ps) ||
+ mv88e6xxx_6320_family(ps)) {
/* Disable ingress rate limiting by resetting all
* ingress rate limit registers to their initial
* state.
*/
- for (i = 0; i < ps->num_ports; i++)
- REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
- 0x9000 | (i << 8));
+ for (i = 0; i < ps->info->num_ports; i++) {
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL2,
+ GLOBAL2_INGRESS_OP,
+ 0x9000 | (i << 8));
+ if (err)
+ return err;
+ }
}
/* Clear the statistics counters for all ports */
- REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
+ err = _mv88e6xxx_reg_write(ps, REG_GLOBAL, GLOBAL_STATS_OP,
+ GLOBAL_STATS_OP_FLUSH_ALL);
+ if (err)
+ return err;
/* Wait for the flush to complete. */
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_stats_wait(ds);
- if (ret < 0)
- goto unlock;
+ err = _mv88e6xxx_stats_wait(ps);
+ if (err)
+ return err;
/* Clear all ATU entries */
- ret = _mv88e6xxx_atu_flush(ds, 0, true);
- if (ret < 0)
- goto unlock;
+ err = _mv88e6xxx_atu_flush(ps, 0, true);
+ if (err)
+ return err;
/* Clear all the VTU and STU entries */
- ret = _mv88e6xxx_vtu_stu_flush(ds);
-unlock:
- mutex_unlock(&ps->smi_mutex);
+ err = _mv88e6xxx_vtu_stu_flush(ps);
+ if (err < 0)
+ return err;
- return ret;
+ return err;
}
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
+static int mv88e6xxx_setup(struct dsa_switch *ds)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
- struct gpio_desc *gpiod = ds->pd->reset;
- unsigned long timeout;
- int ret;
+ int err;
int i;
- /* Set all ports to the disabled state. */
- for (i = 0; i < ps->num_ports; i++) {
- ret = REG_READ(REG_PORT(i), PORT_CONTROL);
- REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
- }
+ ps->ds = ds;
- /* Wait for transmit queues to drain. */
- usleep_range(2000, 4000);
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM))
+ mutex_init(&ps->eeprom_mutex);
- /* If there is a gpio connected to the reset pin, toggle it */
- if (gpiod) {
- gpiod_set_value_cansleep(gpiod, 1);
- usleep_range(10000, 20000);
- gpiod_set_value_cansleep(gpiod, 0);
- usleep_range(10000, 20000);
- }
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ mv88e6xxx_ppu_state_init(ps);
- /* Reset the switch. Keep the PPU active if requested. The PPU
- * needs to be active to support indirect phy register access
- * through global registers 0x18 and 0x19.
- */
- if (ppu_active)
- REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
- else
- REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+ mutex_lock(&ps->smi_mutex);
- /* Wait up to one second for reset to complete. */
- timeout = jiffies + 1 * HZ;
- while (time_before(jiffies, timeout)) {
- ret = REG_READ(REG_GLOBAL, 0x00);
- if ((ret & is_reset) == is_reset)
- break;
- usleep_range(1000, 2000);
+ err = mv88e6xxx_switch_reset(ps);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_setup_global(ps);
+ if (err)
+ goto unlock;
+
+ for (i = 0; i < ps->info->num_ports; i++) {
+ err = mv88e6xxx_setup_port(ps, i);
+ if (err)
+ goto unlock;
}
- if (time_after(jiffies, timeout))
- return -ETIMEDOUT;
- return 0;
+unlock:
+ mutex_unlock(&ps->smi_mutex);
+
+ return err;
}
int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
@@ -2758,7 +3160,7 @@ int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_read(ds, port, page, reg);
+ ret = _mv88e6xxx_phy_page_read(ps, port, page, reg);
mutex_unlock(&ps->smi_mutex);
return ret;
@@ -2771,82 +3173,61 @@ int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
int ret;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_page_write(ds, port, page, reg, val);
+ ret = _mv88e6xxx_phy_page_write(ps, port, page, reg, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
+static int mv88e6xxx_port_to_phy_addr(struct mv88e6xxx_priv_state *ps,
+ int port)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-
- if (port >= 0 && port < ps->num_ports)
+ if (port >= 0 && port < ps->info->num_ports)
return port;
return -EINVAL;
}
-int
-mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int addr = mv88e6xxx_port_to_phy_addr(ps, port);
int ret;
if (addr < 0)
- return addr;
+ return 0xffff;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read(ds, addr, regnum);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int
-mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
- int ret;
- if (addr < 0)
- return addr;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_phy_read_ppu(ps, addr, regnum);
+ else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+ ret = _mv88e6xxx_phy_read_indirect(ps, addr, regnum);
+ else
+ ret = _mv88e6xxx_phy_read(ps, addr, regnum);
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
-int
-mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
+static int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum,
+ u16 val)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
+ int addr = mv88e6xxx_port_to_phy_addr(ps, port);
int ret;
if (addr < 0)
- return addr;
+ return 0xffff;
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
- mutex_unlock(&ps->smi_mutex);
- return ret;
-}
-
-int
-mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
- u16 val)
-{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- int addr = mv88e6xxx_port_to_phy_addr(ds, port);
- int ret;
- if (addr < 0)
- return addr;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_PPU))
+ ret = mv88e6xxx_phy_write_ppu(ps, addr, regnum, val);
+ else if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_SMI_PHY))
+ ret = _mv88e6xxx_phy_write_indirect(ps, addr, regnum, val);
+ else
+ ret = _mv88e6xxx_phy_write(ps, addr, regnum, val);
- mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
mutex_unlock(&ps->smi_mutex);
return ret;
}
@@ -2863,44 +3244,45 @@ static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
mutex_lock(&ps->smi_mutex);
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x6);
if (ret < 0)
goto error;
/* Enable temperature sensor */
- ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ ret = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
if (ret < 0)
goto error;
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret | (1 << 5));
if (ret < 0)
goto error;
/* Wait for temperature to stabilize */
usleep_range(10000, 12000);
- val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
+ val = _mv88e6xxx_phy_read(ps, 0x0, 0x1a);
if (val < 0) {
ret = val;
goto error;
}
/* Disable temperature sensor */
- ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
+ ret = _mv88e6xxx_phy_write(ps, 0x0, 0x1a, ret & ~(1 << 5));
if (ret < 0)
goto error;
*temp = ((val & 0x1f) - 5) * 5;
error:
- _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
+ _mv88e6xxx_phy_write(ps, 0x0, 0x16, 0x0);
mutex_unlock(&ps->smi_mutex);
return ret;
}
static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
*temp = 0;
@@ -2914,20 +3296,26 @@ static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
return 0;
}
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
{
- if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP))
+ return -EOPNOTSUPP;
+
+ if (mv88e6xxx_6320_family(ps) || mv88e6xxx_6352_family(ps))
return mv88e63xx_get_temp(ds, temp);
return mv88e61xx_get_temp(ds, temp);
}
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*temp = 0;
@@ -2941,12 +3329,13 @@ int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
return 0;
}
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
+static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
@@ -2957,12 +3346,13 @@ int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
(ret & 0xe0ff) | (temp << 8));
}
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
+static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ int phy = mv88e6xxx_6320_family(ps) ? 3 : 0;
int ret;
- if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
+ if (!mv88e6xxx_has(ps, MV88E6XXX_FLAG_TEMP_LIMIT))
return -EOPNOTSUPP;
*alarm = false;
@@ -2977,70 +3367,354 @@ int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
}
#endif /* CONFIG_NET_DSA_HWMON */
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
- const struct mv88e6xxx_switch_id *table,
- unsigned int num)
+static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ [MV88E6085] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6085,
+ .family = MV88E6XXX_FAMILY_6097,
+ .name = "Marvell 88E6085",
+ .num_databases = 4096,
+ .num_ports = 10,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6097,
+ },
+
+ [MV88E6095] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6095,
+ .family = MV88E6XXX_FAMILY_6095,
+ .name = "Marvell 88E6095/88E6095F",
+ .num_databases = 256,
+ .num_ports = 11,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6095,
+ },
+
+ [MV88E6123] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6123,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6123",
+ .num_databases = 4096,
+ .num_ports = 3,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6131] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6131,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6131",
+ .num_databases = 256,
+ .num_ports = 8,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6161] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6161,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6161",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6165] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6165,
+ .family = MV88E6XXX_FAMILY_6165,
+ .name = "Marvell 88E6165",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6165,
+ },
+
+ [MV88E6171] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6171,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6171",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6172] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6172,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6172",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6175] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6175,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6175",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6176] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6176,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6176",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6185] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6185,
+ .family = MV88E6XXX_FAMILY_6185,
+ .name = "Marvell 88E6185",
+ .num_databases = 256,
+ .num_ports = 10,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6185,
+ },
+
+ [MV88E6240] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6240,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6240",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+
+ [MV88E6320] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6320,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6320",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6321] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6321,
+ .family = MV88E6XXX_FAMILY_6320,
+ .name = "Marvell 88E6321",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6320,
+ },
+
+ [MV88E6350] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6350",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6351] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6351,
+ .family = MV88E6XXX_FAMILY_6351,
+ .name = "Marvell 88E6351",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6351,
+ },
+
+ [MV88E6352] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6352,
+ .family = MV88E6XXX_FAMILY_6352,
+ .name = "Marvell 88E6352",
+ .num_databases = 4096,
+ .num_ports = 7,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6352,
+ },
+};
+
+static const struct mv88e6xxx_info *
+mv88e6xxx_lookup_info(unsigned int prod_num, const struct mv88e6xxx_info *table,
+ unsigned int num)
{
- struct mii_bus *bus = dsa_host_dev_to_mii_bus(host_dev);
- int i, ret;
+ int i;
+
+ for (i = 0; i < num; ++i)
+ if (table[i].prod_num == prod_num)
+ return &table[i];
+
+ return NULL;
+}
+static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
+ struct device *host_dev, int sw_addr,
+ void **priv)
+{
+ const struct mv88e6xxx_info *info;
+ struct mv88e6xxx_priv_state *ps;
+ struct mii_bus *bus;
+ const char *name;
+ int id, prod_num, rev;
+
+ bus = dsa_host_dev_to_mii_bus(host_dev);
if (!bus)
return NULL;
- ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
- if (ret < 0)
+ id = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), PORT_SWITCH_ID);
+ if (id < 0)
return NULL;
- /* Look up the exact switch ID */
- for (i = 0; i < num; ++i)
- if (table[i].id == ret)
- return table[i].name;
-
- /* Look up only the product number */
- for (i = 0; i < num; ++i) {
- if (table[i].id == (ret & PORT_SWITCH_ID_PROD_NUM_MASK)) {
- dev_warn(host_dev, "unknown revision %d, using base switch 0x%x\n",
- ret & PORT_SWITCH_ID_REV_MASK,
- ret & PORT_SWITCH_ID_PROD_NUM_MASK);
- return table[i].name;
+ prod_num = (id & 0xfff0) >> 4;
+ rev = id & 0x000f;
+
+ info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+ ARRAY_SIZE(mv88e6xxx_table));
+ if (!info)
+ return NULL;
+
+ name = info->name;
+
+ ps = devm_kzalloc(dsa_dev, sizeof(*ps), GFP_KERNEL);
+ if (!ps)
+ return NULL;
+
+ ps->bus = bus;
+ ps->sw_addr = sw_addr;
+ ps->info = info;
+ mutex_init(&ps->smi_mutex);
+
+ *priv = ps;
+
+ dev_info(&ps->bus->dev, "switch 0x%x probed: %s, revision %u\n",
+ prod_num, name, rev);
+
+ return name;
+}
+
+struct dsa_switch_driver mv88e6xxx_switch_driver = {
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .probe = mv88e6xxx_drv_probe,
+ .setup = mv88e6xxx_setup,
+ .set_addr = mv88e6xxx_set_addr,
+ .phy_read = mv88e6xxx_phy_read,
+ .phy_write = mv88e6xxx_phy_write,
+ .adjust_link = mv88e6xxx_adjust_link,
+ .get_strings = mv88e6xxx_get_strings,
+ .get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
+ .get_sset_count = mv88e6xxx_get_sset_count,
+ .set_eee = mv88e6xxx_set_eee,
+ .get_eee = mv88e6xxx_get_eee,
+#ifdef CONFIG_NET_DSA_HWMON
+ .get_temp = mv88e6xxx_get_temp,
+ .get_temp_limit = mv88e6xxx_get_temp_limit,
+ .set_temp_limit = mv88e6xxx_set_temp_limit,
+ .get_temp_alarm = mv88e6xxx_get_temp_alarm,
+#endif
+ .get_eeprom_len = mv88e6xxx_get_eeprom_len,
+ .get_eeprom = mv88e6xxx_get_eeprom,
+ .set_eeprom = mv88e6xxx_set_eeprom,
+ .get_regs_len = mv88e6xxx_get_regs_len,
+ .get_regs = mv88e6xxx_get_regs,
+ .port_bridge_join = mv88e6xxx_port_bridge_join,
+ .port_bridge_leave = mv88e6xxx_port_bridge_leave,
+ .port_stp_state_set = mv88e6xxx_port_stp_state_set,
+ .port_vlan_filtering = mv88e6xxx_port_vlan_filtering,
+ .port_vlan_prepare = mv88e6xxx_port_vlan_prepare,
+ .port_vlan_add = mv88e6xxx_port_vlan_add,
+ .port_vlan_del = mv88e6xxx_port_vlan_del,
+ .port_vlan_dump = mv88e6xxx_port_vlan_dump,
+ .port_fdb_prepare = mv88e6xxx_port_fdb_prepare,
+ .port_fdb_add = mv88e6xxx_port_fdb_add,
+ .port_fdb_del = mv88e6xxx_port_fdb_del,
+ .port_fdb_dump = mv88e6xxx_port_fdb_dump,
+};
+
+int mv88e6xxx_probe(struct mdio_device *mdiodev)
+{
+ struct device *dev = &mdiodev->dev;
+ struct device_node *np = dev->of_node;
+ struct mv88e6xxx_priv_state *ps;
+ int id, prod_num, rev;
+ struct dsa_switch *ds;
+ u32 eeprom_len;
+ int err;
+
+ ds = devm_kzalloc(dev, sizeof(*ds) + sizeof(*ps), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ ps = (struct mv88e6xxx_priv_state *)(ds + 1);
+ ds->priv = ps;
+ ds->dev = dev;
+ ps->dev = dev;
+ ps->ds = ds;
+ ps->bus = mdiodev->bus;
+ ps->sw_addr = mdiodev->addr;
+ mutex_init(&ps->smi_mutex);
+
+ get_device(&ps->bus->dev);
+
+ ds->drv = &mv88e6xxx_switch_driver;
+
+ id = mv88e6xxx_reg_read(ps, REG_PORT(0), PORT_SWITCH_ID);
+ if (id < 0)
+ return id;
+
+ prod_num = (id & 0xfff0) >> 4;
+ rev = id & 0x000f;
+
+ ps->info = mv88e6xxx_lookup_info(prod_num, mv88e6xxx_table,
+ ARRAY_SIZE(mv88e6xxx_table));
+ if (!ps->info)
+ return -ENODEV;
+
+ ps->reset = devm_gpiod_get(&mdiodev->dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(ps->reset)) {
+ err = PTR_ERR(ps->reset);
+ if (err == -ENOENT) {
+ /* Optional, so not an error */
+ ps->reset = NULL;
+ } else {
+ return err;
}
}
- return NULL;
+ if (mv88e6xxx_has(ps, MV88E6XXX_FLAG_EEPROM) &&
+ !of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ ps->eeprom_len = eeprom_len;
+
+ dev_set_drvdata(dev, ds);
+
+ dev_info(dev, "switch 0x%x probed: %s, revision %u\n",
+ prod_num, ps->info->name, rev);
+
+ return 0;
}
+static void mv88e6xxx_remove(struct mdio_device *mdiodev)
+{
+ struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
+ struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+
+ put_device(&ps->bus->dev);
+}
+
+static const struct of_device_id mv88e6xxx_of_match[] = {
+ { .compatible = "marvell,mv88e6085" },
+ { /* sentinel */ },
+};
+
+MODULE_DEVICE_TABLE(of, mv88e6xxx_of_match);
+
+static struct mdio_driver mv88e6xxx_driver = {
+ .probe = mv88e6xxx_probe,
+ .remove = mv88e6xxx_remove,
+ .mdiodrv.driver = {
+ .name = "mv88e6085",
+ .of_match_table = mv88e6xxx_of_match,
+ },
+};
+
static int __init mv88e6xxx_init(void)
{
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
- register_switch_driver(&mv88e6131_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
- register_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
- register_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
- register_switch_driver(&mv88e6171_switch_driver);
-#endif
- return 0;
+ register_switch_driver(&mv88e6xxx_switch_driver);
+ return mdio_driver_register(&mv88e6xxx_driver);
}
module_init(mv88e6xxx_init);
static void __exit mv88e6xxx_cleanup(void)
{
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
- unregister_switch_driver(&mv88e6171_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
- unregister_switch_driver(&mv88e6352_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123)
- unregister_switch_driver(&mv88e6123_switch_driver);
-#endif
-#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
- unregister_switch_driver(&mv88e6131_switch_driver);
-#endif
+ mdio_driver_unregister(&mv88e6xxx_driver);
+ unregister_switch_driver(&mv88e6xxx_switch_driver);
}
module_exit(mv88e6xxx_cleanup);
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
index 26a424acd..36d0e1504 100644
--- a/drivers/net/dsa/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -12,6 +12,7 @@
#define __MV88E6XXX_H
#include <linux/if_vlan.h>
+#include <linux/gpio/consumer.h>
#ifndef UINT64_MAX
#define UINT64_MAX (u64)(~((u64)0))
@@ -68,52 +69,23 @@
#define PORT_PCS_CTRL_UNFORCED 0x03
#define PORT_PAUSE_CTRL 0x02
#define PORT_SWITCH_ID 0x03
-#define PORT_SWITCH_ID_PROD_NUM_MASK 0xfff0
-#define PORT_SWITCH_ID_REV_MASK 0x000f
-#define PORT_SWITCH_ID_6031 0x0310
-#define PORT_SWITCH_ID_6035 0x0350
-#define PORT_SWITCH_ID_6046 0x0480
-#define PORT_SWITCH_ID_6061 0x0610
-#define PORT_SWITCH_ID_6065 0x0650
-#define PORT_SWITCH_ID_6085 0x04a0
-#define PORT_SWITCH_ID_6092 0x0970
-#define PORT_SWITCH_ID_6095 0x0950
-#define PORT_SWITCH_ID_6096 0x0980
-#define PORT_SWITCH_ID_6097 0x0990
-#define PORT_SWITCH_ID_6108 0x1070
-#define PORT_SWITCH_ID_6121 0x1040
-#define PORT_SWITCH_ID_6122 0x1050
-#define PORT_SWITCH_ID_6123 0x1210
-#define PORT_SWITCH_ID_6123_A1 0x1212
-#define PORT_SWITCH_ID_6123_A2 0x1213
-#define PORT_SWITCH_ID_6131 0x1060
-#define PORT_SWITCH_ID_6131_B2 0x1066
-#define PORT_SWITCH_ID_6152 0x1a40
-#define PORT_SWITCH_ID_6155 0x1a50
-#define PORT_SWITCH_ID_6161 0x1610
-#define PORT_SWITCH_ID_6161_A1 0x1612
-#define PORT_SWITCH_ID_6161_A2 0x1613
-#define PORT_SWITCH_ID_6165 0x1650
-#define PORT_SWITCH_ID_6165_A1 0x1652
-#define PORT_SWITCH_ID_6165_A2 0x1653
-#define PORT_SWITCH_ID_6171 0x1710
-#define PORT_SWITCH_ID_6172 0x1720
-#define PORT_SWITCH_ID_6175 0x1750
-#define PORT_SWITCH_ID_6176 0x1760
-#define PORT_SWITCH_ID_6182 0x1a60
-#define PORT_SWITCH_ID_6185 0x1a70
-#define PORT_SWITCH_ID_6240 0x2400
-#define PORT_SWITCH_ID_6320 0x1150
-#define PORT_SWITCH_ID_6320_A1 0x1151
-#define PORT_SWITCH_ID_6320_A2 0x1152
-#define PORT_SWITCH_ID_6321 0x3100
-#define PORT_SWITCH_ID_6321_A1 0x3101
-#define PORT_SWITCH_ID_6321_A2 0x3102
-#define PORT_SWITCH_ID_6350 0x3710
-#define PORT_SWITCH_ID_6351 0x3750
-#define PORT_SWITCH_ID_6352 0x3520
-#define PORT_SWITCH_ID_6352_A0 0x3521
-#define PORT_SWITCH_ID_6352_A1 0x3522
+#define PORT_SWITCH_ID_PROD_NUM_6085 0x04a
+#define PORT_SWITCH_ID_PROD_NUM_6095 0x095
+#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
+#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
+#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
+#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
+#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
+#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
+#define PORT_SWITCH_ID_PROD_NUM_6172 0x172
+#define PORT_SWITCH_ID_PROD_NUM_6175 0x175
+#define PORT_SWITCH_ID_PROD_NUM_6176 0x176
+#define PORT_SWITCH_ID_PROD_NUM_6185 0x1a7
+#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
+#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
+#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
+#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
+#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
#define PORT_CONTROL 0x04
#define PORT_CONTROL_USE_CORE_TAG BIT(15)
#define PORT_CONTROL_DROP_ON_LOCK BIT(14)
@@ -367,9 +339,187 @@
#define MV88E6XXX_N_FID 4096
-struct mv88e6xxx_switch_id {
- u16 id;
- char *name;
+/* List of supported models */
+enum mv88e6xxx_model {
+ MV88E6085,
+ MV88E6095,
+ MV88E6123,
+ MV88E6131,
+ MV88E6161,
+ MV88E6165,
+ MV88E6171,
+ MV88E6172,
+ MV88E6175,
+ MV88E6176,
+ MV88E6185,
+ MV88E6240,
+ MV88E6320,
+ MV88E6321,
+ MV88E6350,
+ MV88E6351,
+ MV88E6352,
+};
+
+enum mv88e6xxx_family {
+ MV88E6XXX_FAMILY_NONE,
+ MV88E6XXX_FAMILY_6065, /* 6031 6035 6061 6065 */
+ MV88E6XXX_FAMILY_6095, /* 6092 6095 */
+ MV88E6XXX_FAMILY_6097, /* 6046 6085 6096 6097 */
+ MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
+ MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
+ MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
+ MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
+};
+
+enum mv88e6xxx_cap {
+ /* Address Translation Unit.
+ * The ATU is used to lookup and learn MAC addresses. See GLOBAL_ATU_OP.
+ */
+ MV88E6XXX_CAP_ATU,
+
+ /* Energy Efficient Ethernet.
+ */
+ MV88E6XXX_CAP_EEE,
+
+ /* EEPROM Command and Data registers.
+ * See GLOBAL2_EEPROM_OP and GLOBAL2_EEPROM_DATA.
+ */
+ MV88E6XXX_CAP_EEPROM,
+
+ /* Port State Filtering for 802.1D Spanning Tree.
+ * See PORT_CONTROL_STATE_* values in the PORT_CONTROL register.
+ */
+ MV88E6XXX_CAP_PORTSTATE,
+
+ /* PHY Polling Unit.
+ * See GLOBAL_CONTROL_PPU_ENABLE and GLOBAL_STATUS_PPU_POLLING.
+ */
+ MV88E6XXX_CAP_PPU,
+ MV88E6XXX_CAP_PPU_ACTIVE,
+
+ /* SMI PHY Command and Data registers.
+ * This requires an indirect access to PHY registers through
+ * GLOBAL2_SMI_OP, otherwise direct access to PHY registers is done.
+ */
+ MV88E6XXX_CAP_SMI_PHY,
+
+ /* Per VLAN Spanning Tree Unit (STU).
+ * The Port State database, if present, is accessed through VTU
+ * operations and dedicated SID registers. See GLOBAL_VTU_SID.
+ */
+ MV88E6XXX_CAP_STU,
+
+ /* Switch MAC/WoL/WoF register.
+ * This requires an indirect access to set the switch MAC address
+ * through GLOBAL2_SWITCH_MAC, otherwise GLOBAL_MAC_01, GLOBAL_MAC_23,
+ * and GLOBAL_MAC_45 are used with a direct access.
+ */
+ MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF,
+
+ /* Internal temperature sensor.
+ * Available from any enabled port's PHY register 26, page 6.
+ */
+ MV88E6XXX_CAP_TEMP,
+ MV88E6XXX_CAP_TEMP_LIMIT,
+
+ /* In-chip Port Based VLANs.
+ * Each port VLANTable register (see PORT_BASE_VLAN) is used to restrict
+ * the output (or egress) ports to which it is allowed to send frames.
+ */
+ MV88E6XXX_CAP_VLANTABLE,
+
+ /* VLAN Table Unit.
+ * The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
+ */
+ MV88E6XXX_CAP_VTU,
+};
+
+/* Bitmask of capabilities */
+#define MV88E6XXX_FLAG_ATU BIT(MV88E6XXX_CAP_ATU)
+#define MV88E6XXX_FLAG_EEE BIT(MV88E6XXX_CAP_EEE)
+#define MV88E6XXX_FLAG_EEPROM BIT(MV88E6XXX_CAP_EEPROM)
+#define MV88E6XXX_FLAG_PORTSTATE BIT(MV88E6XXX_CAP_PORTSTATE)
+#define MV88E6XXX_FLAG_PPU BIT(MV88E6XXX_CAP_PPU)
+#define MV88E6XXX_FLAG_PPU_ACTIVE BIT(MV88E6XXX_CAP_PPU_ACTIVE)
+#define MV88E6XXX_FLAG_SMI_PHY BIT(MV88E6XXX_CAP_SMI_PHY)
+#define MV88E6XXX_FLAG_STU BIT(MV88E6XXX_CAP_STU)
+#define MV88E6XXX_FLAG_SWITCH_MAC BIT(MV88E6XXX_CAP_SWITCH_MAC_WOL_WOF)
+#define MV88E6XXX_FLAG_TEMP BIT(MV88E6XXX_CAP_TEMP)
+#define MV88E6XXX_FLAG_TEMP_LIMIT BIT(MV88E6XXX_CAP_TEMP_LIMIT)
+#define MV88E6XXX_FLAG_VLANTABLE BIT(MV88E6XXX_CAP_VLANTABLE)
+#define MV88E6XXX_FLAG_VTU BIT(MV88E6XXX_CAP_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6095 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6097 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6165 \
+ (MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6185 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PPU | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6320 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6351 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+#define MV88E6XXX_FLAGS_FAMILY_6352 \
+ (MV88E6XXX_FLAG_ATU | \
+ MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_EEPROM | \
+ MV88E6XXX_FLAG_PORTSTATE | \
+ MV88E6XXX_FLAG_PPU_ACTIVE | \
+ MV88E6XXX_FLAG_SMI_PHY | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_SWITCH_MAC | \
+ MV88E6XXX_FLAG_TEMP | \
+ MV88E6XXX_FLAG_TEMP_LIMIT | \
+ MV88E6XXX_FLAG_VLANTABLE | \
+ MV88E6XXX_FLAG_VTU)
+
+struct mv88e6xxx_info {
+ enum mv88e6xxx_family family;
+ u16 prod_num;
+ const char *name;
+ unsigned int num_databases;
+ unsigned int num_ports;
+ unsigned long flags;
};
struct mv88e6xxx_atu_entry {
@@ -393,17 +543,29 @@ struct mv88e6xxx_vtu_stu_entry {
struct mv88e6xxx_priv_port {
struct net_device *bridge_dev;
- u8 state;
};
struct mv88e6xxx_priv_state {
+ const struct mv88e6xxx_info *info;
+
+ /* The dsa_switch this private structure is related to */
+ struct dsa_switch *ds;
+
+ /* The device this structure is associated to */
+ struct device *dev;
+
/* When using multi-chip addressing, this mutex protects
* access to the indirect access registers. (In single-chip
* mode, this mutex is effectively useless.)
*/
struct mutex smi_mutex;
-#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+ /* The MII bus and the address on the bus that is used to
+ * communication with the switch
+ */
+ struct mii_bus *bus;
+ int sw_addr;
+
/* Handles automatic disabling and re-enabling of the PHY
* polling unit.
*/
@@ -411,7 +573,6 @@ struct mv88e6xxx_priv_state {
int ppu_disabled;
struct work_struct ppu_work;
struct timer_list ppu_timer;
-#endif
/* This mutex serialises access to the statistics unit.
* Hold this mutex over snapshot + dump sequences.
@@ -429,14 +590,16 @@ struct mv88e6xxx_priv_state {
*/
struct mutex eeprom_mutex;
- int id; /* switch product id */
- int num_ports; /* number of switch ports */
-
struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
- DECLARE_BITMAP(port_state_update_mask, DSA_MAX_PORTS);
+ /* A switch may have a GPIO line tied to its reset pin. Parse
+ * this from the device tree, and use it before performing
+ * switch soft reset.
+ */
+ struct gpio_desc *reset;
- struct work_struct bridge_work;
+ /* set to size of eeprom if supported by the switch */
+ int eeprom_len;
};
enum stat_type {
@@ -452,104 +615,10 @@ struct mv88e6xxx_hw_stat {
enum stat_type type;
};
-int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active);
-char *mv88e6xxx_lookup_name(struct device *host_dev, int sw_addr,
- const struct mv88e6xxx_switch_id *table,
- unsigned int num);
-int mv88e6xxx_setup_ports(struct dsa_switch *ds);
-int mv88e6xxx_setup_common(struct dsa_switch *ds);
-int mv88e6xxx_setup_global(struct dsa_switch *ds);
-int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
-int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
-int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
-int mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
- u16 val);
-void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
- int regnum, u16 val);
-void mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
-void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
- uint64_t *data);
-int mv88e6xxx_get_sset_count(struct dsa_switch *ds);
-int mv88e6xxx_get_sset_count_basic(struct dsa_switch *ds);
-void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev);
-int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port);
-void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
- struct ethtool_regs *regs, void *_p);
-int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp);
-int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp);
-int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm);
-int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds);
-int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds);
-int mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr, int regnum);
-int mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr, int regnum,
- u16 val);
-int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e);
-int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
- struct phy_device *phydev, struct ethtool_eee *e);
-int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge);
-void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port);
-int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state);
-int mv88e6xxx_port_vlan_filtering(struct dsa_switch *ds, int port,
- bool vlan_filtering);
-int mv88e6xxx_port_vlan_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_vlan *vlan);
-int mv88e6xxx_port_vlan_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_vlan *vlan,
- int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb,
- struct switchdev_trans *trans);
-int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
- const struct switchdev_obj_port_fdb *fdb);
-int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
- struct switchdev_obj_port_fdb *fdb,
- int (*cb)(struct switchdev_obj *obj));
-int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg);
-int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
- int reg, int val);
-
-extern struct dsa_switch_driver mv88e6131_switch_driver;
-extern struct dsa_switch_driver mv88e6123_switch_driver;
-extern struct dsa_switch_driver mv88e6352_switch_driver;
-extern struct dsa_switch_driver mv88e6171_switch_driver;
-
-#define REG_READ(addr, reg) \
- ({ \
- int __ret; \
- \
- __ret = mv88e6xxx_reg_read(ds, addr, reg); \
- if (__ret < 0) \
- return __ret; \
- __ret; \
- })
-
-#define REG_WRITE(addr, reg, val) \
- ({ \
- int __ret; \
- \
- __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \
- if (__ret < 0) \
- return __ret; \
- })
-
-
+static inline bool mv88e6xxx_has(struct mv88e6xxx_priv_state *ps,
+ unsigned long flags)
+{
+ return (ps->info->flags & flags) == flags;
+}
#endif
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 7677c745f..91ada52f7 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 942fb0d5a..b26e038b4 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b9948f00c..b88afd759 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_notice(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index c5a320507..71396e4b8 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index d81fceddb..25c55ab05 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev)
}
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/*
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index ec6eac1f8..4ea717d68 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
}
@@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
}
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b96e8852b..60f8e2c8e 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx2 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
} else
@@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx1 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
} else
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e498eb0b9..2f9258e3e 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1128,7 +1128,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 74139cb7f..3d2245fdc 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1430,7 +1430,7 @@ static void bfin_mac_timeout(struct net_device *dev)
bfin_mac_enable(lp->phydev);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static void bfin_mac_multicast_hash(struct net_device *dev)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index b873531c5..bca07c5c9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev)
static int greth_mdio_init(struct greth_private *greth)
{
- int ret, phy;
+ int ret;
unsigned long timeout;
greth->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 0907ab6ff..821d86c38 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3349,7 +3349,7 @@ static void et131x_down(struct net_device *netdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
/* Save the timestamp for the TX watchdog, prevent a timeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
phy_stop(adapter->phydev);
et131x_disable_txrx(netdev);
@@ -3816,7 +3816,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* TCB is not available */
if (tx_ring->used >= NUM_TCB)
@@ -3851,7 +3851,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 8d50314ac..de2c4bf5f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -428,7 +428,7 @@ static void emac_timeout(struct net_device *dev)
emac_reset(db);
emac_init_device(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
/* Restore previous register address */
@@ -468,7 +468,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL0_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else if (channel == 1) {
/* set TX len */
writel(skb->len, db->membase + EMAC_TX_PL1_REG);
@@ -477,7 +477,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL1_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if ((db->tx_fifo_stat & 3) == 3) {
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 66d0b73c3..dcf2a1f36 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
printk("Lance restart=%d\n", status);
@@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
-
netif_stop_queue(dev);
+ if (!TX_BUFFS_AVAIL) {
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
skblen = skb->len;
#ifdef DEBUG_DRIVER
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 56139184b..a83cd1c4c 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance(lp);
@@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
local_irq_save(flags);
- if (!lance_tx_buffs_avail(lp)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ if (!lance_tx_buffs_avail(lp))
+ goto out_free;
#ifdef DEBUG
/* dump the packet */
@@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
dev_kfree_skb(skb);
local_irq_restore(flags);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index b10964e8c..d2bc8e5dc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d3977d032..20760e102 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
@@ -1074,7 +1074,7 @@ static void au1000_tx_timeout(struct net_device *dev)
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1269,7 +1269,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy_irq = pd->phy_irq;
}
- if (aup->phy_busid && aup->phy_busid > 0) {
+ if (aup->phy_busid > 0) {
dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b584b7823..b799c7ac8 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 3a7ebfdda..abb1ba228 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 1cf33addd..cda53db75 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 27245efe9..2807e1816 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 7ccebae9c..c22bf52d3 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7847638bd..9b56b4025 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 11be8044e..472c0fb3f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -730,6 +730,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
return xgene_cle_setup_ptree(pdata, enet_cle);
}
-struct xgene_cle_ops xgene_cle3in_ops = {
+const struct xgene_cle_ops xgene_cle3in_ops = {
.cle_init = xgene_enet_cle_init,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 3bf906832..33c5f6b25 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -292,6 +292,6 @@ struct xgene_enet_cle {
u32 jump_bytes;
};
-extern struct xgene_cle_ops xgene_cle3in_ops;
+extern const struct xgene_cle_ops xgene_cle3in_ops;
#endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 513d2a62e..2f5638f7f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -827,7 +827,7 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
return -EINVAL;
phy = get_phy_device(mdio, phy_id, false);
- if (!phy || IS_ERR(phy))
+ if (IS_ERR(phy))
return -EIO;
ret = phy_device_register(phy);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index fd200883d..d208b172f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -973,6 +973,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
return owner;
}
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ u32 cpu_bufnum;
+ int ret;
+
+ ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+ return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -981,13 +992,15 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
dma_addr_t dma_exp_bufs;
- u8 cpu_bufnum = pdata->cpu_bufnum;
+ u8 cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
u16 ring_id;
int i, ret, size;
+ cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
for (i = 0; i < pdata->rxq_cnt; i++) {
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 9d9cf4451..092fbecca 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -201,7 +201,7 @@ struct xgene_enet_pdata {
const struct xgene_mac_ops *mac_ops;
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
- struct xgene_cle_ops *cle_ops;
+ const struct xgene_cle_ops *cle_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550..058460bdd 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
index d02c4240b..8fc93c5f6 100644
--- a/drivers/net/ethernet/atheros/alx/alx.h
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -96,10 +96,6 @@ struct alx_priv {
unsigned int rx_ringsz;
unsigned int rxbuf_size;
- struct page *rx_page;
- unsigned int rx_page_offset;
- unsigned int rx_frag_size;
-
struct napi_struct napi;
struct alx_tx_queue txq;
struct alx_rx_queue rxq;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 8611811fb..e708e360a 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -70,35 +70,6 @@ static void alx_free_txbuf(struct alx_priv *alx, int entry)
}
}
-static struct sk_buff *alx_alloc_skb(struct alx_priv *alx, gfp_t gfp)
-{
- struct sk_buff *skb;
- struct page *page;
-
- if (alx->rx_frag_size > PAGE_SIZE)
- return __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
-
- page = alx->rx_page;
- if (!page) {
- alx->rx_page = page = alloc_page(gfp);
- if (unlikely(!page))
- return NULL;
- alx->rx_page_offset = 0;
- }
-
- skb = build_skb(page_address(page) + alx->rx_page_offset,
- alx->rx_frag_size);
- if (likely(skb)) {
- alx->rx_page_offset += alx->rx_frag_size;
- if (alx->rx_page_offset >= PAGE_SIZE)
- alx->rx_page = NULL;
- else
- get_page(page);
- }
- return skb;
-}
-
-
static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
{
struct alx_rx_queue *rxq = &alx->rxq;
@@ -115,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = alx_alloc_skb(alx, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
@@ -153,7 +137,6 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
}
-
return count;
}
@@ -622,11 +605,6 @@ static void alx_free_rings(struct alx_priv *alx)
kfree(alx->txq.bufs);
kfree(alx->rxq.bufs);
- if (alx->rx_page) {
- put_page(alx->rx_page);
- alx->rx_page = NULL;
- }
-
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -681,7 +659,6 @@ static int alx_request_irq(struct alx_priv *alx)
alx->dev->name, alx);
if (!err)
goto out;
-
/* fall back to legacy interrupt */
pci_disable_msi(alx->hw.pdev);
}
@@ -725,7 +702,6 @@ static int alx_init_sw(struct alx_priv *alx)
struct pci_dev *pdev = alx->hw.pdev;
struct alx_hw *hw = &alx->hw;
int err;
- unsigned int head_size;
err = alx_identify_hw(alx);
if (err) {
@@ -741,12 +717,7 @@ static int alx_init_sw(struct alx_priv *alx)
hw->smb_timer = 400;
hw->mtu = alx->dev->mtu;
-
alx->rxbuf_size = ALX_MAX_FRAME_LEN(hw->mtu);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
-
alx->tx_ringsz = 256;
alx->rx_ringsz = 512;
hw->imt = 200;
@@ -787,7 +758,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
static void alx_netif_stop(struct alx_priv *alx)
{
- alx->dev->trans_start = jiffies;
+ netif_trans_update(alx->dev);
if (netif_carrier_ok(alx->dev)) {
netif_carrier_off(alx->dev);
netif_tx_disable(alx->dev);
@@ -848,7 +819,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
{
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- unsigned int head_size;
if ((max_frame < ALX_MIN_FRAME_SIZE) ||
(max_frame > ALX_MAX_FRAME_SIZE))
@@ -860,9 +830,6 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
netdev->mtu = mtu;
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
- head_size = SKB_DATA_ALIGN(alx->rxbuf_size + NET_SKB_PAD) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- alx->rx_frag_size = roundup_pow_of_two(head_size);
netdev_update_features(netdev);
if (netif_running(netdev))
alx_reinit(alx);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b9203d928..c46b489ce 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -488,7 +488,7 @@ struct atl1c_tpd_ring {
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
- u16 next_to_use; /* this is protectd by adapter->tx_lock */
+ u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
};
@@ -542,7 +542,6 @@ struct atl1c_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct common_task;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d0084d4d1..a3200ea6d 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
@@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
@@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
- if (netif_msg_pktdata(adapter))
- dev_info(&adapter->pdev->dev, "tx locked\n");
- return NETDEV_TX_LOCKED;
- }
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
"tx-skb droppted due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 0212dac7e..632bb843a 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -442,7 +442,6 @@ struct atl1e_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct reset_task;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 59a03a193..974713b19 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter)
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
@@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1e_tpd_desc *tpd;
@@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
- return NETDEV_TX_LOCKED;
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
}
atl1e_tx_queue(adapter, tpd_req, tpd);
-
- netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
out:
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features | NETIF_F_LLTX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
/* not enabled by default */
netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b6..1a3555d03 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 993c780bd..bfa26a259 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -392,7 +392,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
@@ -831,7 +831,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable RX interrupts */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
}
@@ -873,7 +873,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&priv->napi))) {
/* disable RX interrupts */
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
- __napi_schedule(&priv->napi);
+ __napi_schedule_irqoff(&priv->napi);
}
}
@@ -916,7 +916,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_1_mask_set(priv, BIT(ring));
- __napi_schedule(&txr->napi);
+ __napi_schedule_irqoff(&txr->napi);
}
}
@@ -1117,7 +1117,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
{
netdev_warn(dev, "transmit timeout!\n");
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
netif_tx_wake_all_queues(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 38db2e4d7..25bbae592 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -231,7 +231,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
@@ -267,15 +267,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -1312,7 +1313,8 @@ static int bgmac_open(struct net_device *net_dev)
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
@@ -1515,7 +1517,7 @@ static int bgmac_mii_register(struct bgmac *bgmac)
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "PHY connecton failed\n");
+ bgmac_err(bgmac, "PHY connection failed\n");
err = PTR_ERR(phy_dev);
goto err_unregister_bus;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5f2cd0eb9..9089404bb 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12889,52 +12889,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@@ -12942,93 +12961,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
@@ -13253,12 +13226,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_IPXIP4;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_GSO_IPIP |
- NETIF_F_GSO_SIT |
+ NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -13878,14 +13850,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
@@ -13894,19 +13866,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13925,7 +13897,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
@@ -13933,7 +13905,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
@@ -13966,6 +13938,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c39a7f5c6..c777cde85 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -78,6 +78,7 @@ enum board_idx {
BCM57402,
BCM57404,
BCM57406,
+ BCM57314,
BCM57304_VF,
BCM57404_VF,
};
@@ -92,6 +93,7 @@ static const struct {
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
};
@@ -103,6 +105,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+ { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
@@ -118,6 +121,13 @@ static const u16 bnxt_vf_req_snif[] = {
HWRM_CFA_L2_FILTER_ALLOC,
};
+static const u16 bnxt_async_events_arr[] = {
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == BCM57304_VF || idx == BCM57404_VF);
@@ -276,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
@@ -288,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
@@ -1102,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1267,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1313,6 +1313,10 @@ next_rx_no_prod:
return rc;
}
+#define BNXT_GET_EVENT_PORT(data) \
+ ((data) & \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -1320,12 +1324,40 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ goto async_event_process_exit;
+ if (data1 & 0x20000) {
+ u16 fw_speed = link_info->force_link_speed;
+ u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
+ }
+ /* fall thru */
+ }
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u16 port_id = BNXT_GET_EVENT_PORT(data1);
+
+ if (BNXT_VF(bp))
+ break;
+
+ if (bp->pf.port_id != port_id)
+ break;
+
+ set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
+ break;
+ }
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
@@ -1452,7 +1484,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* The valid test of the entry must be done first before
* reading any further.
*/
- rmb();
+ dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -2729,7 +2761,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
- int i, intr_process, rc;
+ int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
@@ -2758,11 +2790,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
timeout = DFLT_HWRM_CMD_TIMEOUT;
i = 0;
+ tmo_count = timeout * 40;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
- i++ < timeout) {
- usleep_range(600, 800);
+ i++ < tmo_count) {
+ usleep_range(25, 40);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -2773,30 +2806,30 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
} else {
/* Check if response len is updated */
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
- usleep_range(600, 800);
+ usleep_range(25, 40);
}
- if (i >= timeout) {
+ if (i >= tmo_count) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
timeout, le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), *resp_len);
+ le16_to_cpu(req->seq_id), len);
return -1;
}
/* Last word of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 4;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < 5; i++) {
if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
break;
- usleep_range(600, 800);
+ udelay(1);
}
- if (i >= timeout) {
+ if (i >= 5) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
timeout, le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
@@ -2842,6 +2875,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
int i;
+ DECLARE_BITMAP(async_events_bmap, 256);
+ u32 *events = (u32 *)async_events_bmap;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -2850,11 +2885,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER |
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
- /* TODO: current async event fwd bits are not defined and the firmware
- * only checks if it is non-zero to enable async event forwarding
- */
- req.async_event_fwd[0] |= cpu_to_le32(1);
- req.os_type = cpu_to_le16(1);
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+ req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
req.ver_upd = DRV_VER_UPD;
@@ -3817,7 +3855,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3842,7 +3880,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
if (is_valid_ether_addr(vf->mac_addr))
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
@@ -3933,6 +3971,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+ bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+ resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -4589,12 +4629,49 @@ static void bnxt_report_link(struct bnxt *bp)
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
+ if (bp->flags & BNXT_FLAG_EEE_CAP)
+ netdev_info(bp->dev, "EEE is %s\n",
+ bp->eee.eee_active ? "active" :
+ "not active");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_phy_qcaps_input req = {0};
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+
+ if (bp->hwrm_spec_code < 0x10201)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_phy_qcaps_exit;
+
+ if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
+
+ bp->flags |= BNXT_FLAG_EEE_CAP;
+ eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
+ bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
+ }
+
+hwrm_phy_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
int rc = 0;
@@ -4626,7 +4703,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
- link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->lp_auto_link_speeds =
@@ -4636,9 +4712,47 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
link_info->media_type = resp->media_type;
- link_info->transceiver = resp->transceiver_type;
- link_info->phy_addr = resp->phy_addr;
+ link_info->phy_type = resp->phy_type;
+ link_info->transceiver = resp->xcvr_pkg_type;
+ link_info->phy_addr = resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+ link_info->module_status = resp->module_status;
+
+ if (bp->flags & BNXT_FLAG_EEE_CAP) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds;
+
+ eee->eee_active = 0;
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
+ eee->eee_active = 1;
+ fw_speeds = le16_to_cpu(
+ resp->link_partner_adv_eee_link_speed_mask);
+ eee->lp_advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ }
+
+ /* Pull initial EEE config */
+ if (!chng_link_state) {
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
+ eee->eee_enabled = 1;
+ fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
+ eee->advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
+ __le32 tmr;
+
+ eee->tx_lpi_enabled = 1;
+ tmr = resp->xcvr_identifier_type_tx_lpi_timer;
+ eee->tx_lpi_timer = le32_to_cpu(tmr) &
+ PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
+ }
+ }
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4655,10 +4769,40 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
return 0;
}
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+ u8 module_status;
+
+ if (bnxt_update_link(bp, true))
+ return;
+
+ module_status = link_info->module_status;
+ switch (module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+ netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+ bp->pf.port_id);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ netdev_warn(bp->dev, "Module part number %s\n",
+ resp->phy_vendor_partnumber);
+ }
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+ netdev_warn(bp->dev, "TX is disabled\n");
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+ netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+ }
+}
+
static void
bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->hwrm_spec_code >= 0x10201)
+ req->auto_pause =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
@@ -4672,6 +4816,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ req->auto_pause = req->force_pause;
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ }
}
}
@@ -4684,7 +4833,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+ PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
req->enables |= cpu_to_le32(
PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
@@ -4698,9 +4847,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
}
- /* currently don't support half duplex */
- req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
- req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
/* tell chimp that the setting takes effect immediately */
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
}
@@ -4735,7 +4881,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+static void bnxt_hwrm_set_eee(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct ethtool_eee *eee = &bp->eee;
+
+ if (eee->eee_enabled) {
+ u16 eee_speeds;
+ u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
+
+ if (eee->tx_lpi_enabled)
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
+ else
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
+
+ req->flags |= cpu_to_le32(flags);
+ eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
+ req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
+ req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
+ } else {
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
+ }
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -4744,14 +4913,57 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
bnxt_hwrm_set_pause_common(bp, &req);
bnxt_hwrm_set_link_common(bp, &req);
+
+ if (set_eee)
+ bnxt_hwrm_set_eee(bp, &req);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ if (pci_num_vf(bp->pdev))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static bool bnxt_eee_config_ok(struct bnxt *bp)
+{
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return true;
+
+ if (eee->eee_enabled) {
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ eee->eee_enabled = 0;
+ return false;
+ }
+ if (eee->advertised & ~advertising) {
+ eee->advertised = advertising & eee->supported;
+ return false;
+ }
+ }
+ return true;
+}
+
static int bnxt_update_phy_setting(struct bnxt *bp)
{
int rc;
bool update_link = false;
bool update_pause = false;
+ bool update_eee = false;
struct bnxt_link_info *link_info = &bp->link_info;
rc = bnxt_update_link(bp, true);
@@ -4761,7 +4973,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
- link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
+ link_info->req_flow_ctrl)
update_pause = true;
if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
link_info->force_pause_setting != link_info->req_flow_ctrl)
@@ -4780,8 +4993,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
update_link = true;
}
+ if (!bnxt_eee_config_ok(bp))
+ update_eee = true;
+
if (update_link)
- rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
else if (update_pause)
rc = bnxt_hwrm_set_pause(bp);
if (rc) {
@@ -4872,7 +5088,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Enable TX queues */
bnxt_tx_enable(bp);
mod_timer(&bp->timer, jiffies + bp->current_interval);
- bnxt_update_link(bp, true);
+ /* Poll link status and check for SFP+ module status */
+ bnxt_get_port_module_status(bp);
return 0;
@@ -4972,6 +5189,7 @@ static int bnxt_close(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
bnxt_close_nic(bp, true, true);
+ bnxt_hwrm_shutdown_link(bp);
return 0;
}
@@ -5238,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
if (!bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+
return features;
}
@@ -5453,6 +5685,9 @@ static void bnxt_sp_task(struct work_struct *work)
rtnl_unlock();
}
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
+ bnxt_get_port_module_status(bp);
+
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
@@ -5583,10 +5818,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
-#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
- return -EADDRNOTAVAIL;
-#endif
+ rc = bnxt_approve_mac(bp, addr->sa_data);
+ if (rc)
+ return rc;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
@@ -5921,6 +6155,13 @@ static int bnxt_probe_phy(struct bnxt *bp)
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
+ rc = bnxt_hwrm_phy_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -5930,15 +6171,24 @@ static int bnxt_probe_phy(struct bnxt *bp)
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
- link_info->autoneg = BNXT_AUTONEG_SPEED |
- BNXT_AUTONEG_FLOW_CTRL;
+ link_info->autoneg = BNXT_AUTONEG_SPEED;
+ if (bp->hwrm_spec_code >= 0x10201) {
+ if (link_info->auto_pause_setting &
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ } else {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ }
link_info->advertising = link_info->auto_link_speeds;
- link_info->req_flow_ctrl = link_info->auto_pause_setting;
} else {
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_duplex = link_info->duplex_setting;
- link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->req_flow_ctrl =
+ link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+ else
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
return rc;
}
@@ -6013,6 +6263,22 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc;
}
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+ if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+ else
+ netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ "Unknown", width);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -6049,15 +6315,19 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
- NETIF_F_RXHASH |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
@@ -6128,6 +6398,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
+ bnxt_parse_log_pcie_link(bp);
+
return 0;
init_err:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index de9d53eee..2824d65b2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,7 +11,7 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.0.0"
+#define DRV_MODULE_VERSION "1.2.0"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 0
@@ -425,10 +425,17 @@ struct rx_tpa_end_cmp_ext {
#define MAX_TPA 64
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES 1
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 8
+#else
#define MAX_RX_PAGES 8
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
#define MAX_CP_PAGES 64
+#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
@@ -774,6 +781,7 @@ struct bnxt_ntuple_filter {
};
struct bnxt_link_info {
+ u8 phy_type;
u8 media_type;
u8 transceiver;
u8 phy_addr;
@@ -803,7 +811,7 @@ struct bnxt_link_info {
#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
-#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
#define PHY_VER_LEN 3
u8 phy_ver[PHY_VER_LEN];
u16 link_speed;
@@ -828,9 +836,9 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
u16 lp_auto_link_speeds;
- u16 auto_link_speed;
u16 force_link_speed;
u32 preemphasis;
+ u8 module_status;
/* copy of requested setting from ethtool cmd */
u8 autoneg;
@@ -841,6 +849,7 @@ struct bnxt_link_info {
u16 req_link_speed;
u32 advertising;
bool force_link_chng;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -890,6 +899,7 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
+ #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -955,6 +965,7 @@ struct bnxt {
u32 msg_enable;
+ u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
void *hwrm_cmd_resp_addr;
@@ -1006,6 +1017,7 @@ struct bnxt {
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1026,6 +1038,9 @@ struct bnxt {
int ntp_fltr_count;
struct bnxt_link_info link_info;
+ struct ethtool_eee eee;
+ u32 lpi_tmr_lo;
+ u32 lpi_tmr_hi;
};
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1115,6 +1130,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
#endif
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define SFF_MODULE_ID_SFP 0x3
+#define SFF_MODULE_ID_QSFP 0xc
+#define SFF_MODULE_ID_QSFP_PLUS 0xd
+#define SFF_MODULE_ID_QSFP28 0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+
void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1123,7 +1148,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
-int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2e472f6db..1b0ae4a72 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -327,7 +327,11 @@ static void bnxt_get_channels(struct net_device *dev,
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
channel->max_combined = max_rx_rings;
- bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
+ if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+ max_rx_rings = 0;
+ max_tx_rings = 0;
+ }
+
tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -597,7 +601,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
-static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -698,10 +702,23 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (link_info->phy_link_status == BNXT_LINK_LINK)
cmd->lp_advertising =
bnxt_fw_to_ethtool_lp_adv(link_info);
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ if (!netif_carrier_ok(dev))
+ cmd->duplex = DUPLEX_UNKNOWN;
+ else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
+ else
+ cmd->duplex = DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
cmd->advertising = 0;
+ ethtool_speed =
+ bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+ cmd->duplex = DUPLEX_HALF;
+ if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+ cmd->duplex = DUPLEX_FULL;
}
+ ethtool_cmd_speed_set(cmd, ethtool_speed);
cmd->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
@@ -719,16 +736,8 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->port = PORT_FIBRE;
}
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
- } else {
- cmd->duplex = DUPLEX_UNKNOWN;
- }
- ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- ethtool_cmd_speed_set(cmd, ethtool_speed);
if (link_info->transceiver ==
- PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
+ PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL)
cmd->transceiver = XCVR_INTERNAL;
else
cmd->transceiver = XCVR_EXTERNAL;
@@ -739,31 +748,52 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_spds = link_info->support_speeds;
+ u32 fw_speed = 0;
+
switch (ethtool_speed) {
case SPEED_100:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ break;
case SPEED_1000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ break;
case SPEED_2500:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ break;
case SPEED_10000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ break;
case SPEED_20000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ break;
case SPEED_25000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ break;
case SPEED_40000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ break;
case SPEED_50000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
}
- return 0;
+ return fw_speed;
}
-static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
{
u16 fw_speed_mask = 0;
@@ -823,6 +853,16 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
set_pause = true;
} else {
+ u16 fw_speed;
+ u8 phy_type = link_info->phy_type;
+
+ if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+ link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "10GBase-T devices must autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
/* TODO: currently don't support half duplex */
if (cmd->duplex == DUPLEX_HALF) {
netdev_err(dev, "HALF DUPLEX is not supported!\n");
@@ -833,14 +873,19 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_UNKNOWN)
cmd->duplex = DUPLEX_FULL;
speed = ethtool_cmd_speed(cmd);
- link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ fw_speed = bnxt_get_fw_speed(dev, speed);
+ if (!fw_speed) {
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->req_link_speed = fw_speed;
link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
link_info->autoneg = 0;
link_info->advertising = 0;
}
if (netif_running(dev))
- rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
return rc;
@@ -874,7 +919,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
return -EINVAL;
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ if (bp->hwrm_spec_code >= 0x10201)
+ link_info->req_flow_ctrl =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -882,17 +929,13 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
link_info->force_link_chng = true;
link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ link_info->req_flow_ctrl = 0;
}
if (epause->rx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
@@ -1381,6 +1424,199 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ int rc = 0;
+
+ if (BNXT_VF(bp))
+ return 0;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ if (!edata->eee_enabled)
+ goto eee_ok;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ netdev_warn(dev, "EEE requires autoneg\n");
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled) {
+ if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+ edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+ netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
+ bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+ return -EINVAL;
+ } else if (!bp->lpi_tmr_hi) {
+ edata->tx_lpi_timer = eee->tx_lpi_timer;
+ }
+ }
+ if (!edata->advertised) {
+ edata->advertised = advertising & eee->supported;
+ } else if (edata->advertised & ~advertising) {
+ netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
+ edata->advertised, advertising);
+ return -EINVAL;
+ }
+
+ eee->advertised = edata->advertised;
+ eee->tx_lpi_enabled = edata->tx_lpi_enabled;
+ eee->tx_lpi_timer = edata->tx_lpi_timer;
+eee_ok:
+ eee->eee_enabled = edata->eee_enabled;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, false, true);
+
+ return rc;
+}
+
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ *edata = bp->eee;
+ if (!bp->eee.eee_enabled) {
+ /* Preserve tx_lpi_timer so that the last value will be used
+ * by default when it is re-enabled.
+ */
+ edata->advertised = 0;
+ edata->tx_lpi_enabled = 0;
+ }
+
+ if (!bp->eee.eee_active)
+ edata->lp_advertised = 0;
+
+ return 0;
+}
+
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+ u16 page_number, u16 start_addr,
+ u16 data_length, u8 *buf)
+{
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc, byte_offset = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = i2c_addr;
+ req.page_number = cpu_to_le16(page_number);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ do {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+ data_length -= xfer_size;
+ req.page_offset = cpu_to_le16(start_addr + byte_offset);
+ req.data_length = xfer_size;
+ req.enables = cpu_to_le32(start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (!rc)
+ memcpy(buf + byte_offset, output->data, xfer_size);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ byte_offset += xfer_size;
+ } while (!rc && data_length > 0);
+
+ return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* No point in going further if phy status indicates
+ * module is not inserted or if it is powered down or
+ * if it is of type 10GBase-T
+ */
+ if (bp->link_info.module_status >
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return -EOPNOTSUPP;
+
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10202)
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+ req.page_number = 0;
+ req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+ req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u32 module_id = le32_to_cpu(output->data[0]);
+
+ switch (module_id) {
+ case SFF_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP:
+ case SFF_MODULE_ID_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u16 start = eeprom->offset, length = eeprom->len;
+ int rc = 0;
+
+ memset(data, 0, eeprom->len);
+
+ /* Read A0 portion of the EEPROM */
+ if (start < ETH_MODULE_SFF_8436_LEN) {
+ if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+ length = ETH_MODULE_SFF_8436_LEN - start;
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ start, length, data);
+ if (rc)
+ return rc;
+ start += length;
+ data += length;
+ length = eeprom->len - length;
+ }
+
+ /* Read A2 portion of the EEPROM */
+ if (length) {
+ start -= ETH_MODULE_SFF_8436_LEN;
+ bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+ length, data);
+ }
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_settings = bnxt_get_settings,
.set_settings = bnxt_set_settings,
@@ -1409,4 +1645,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_eee = bnxt_get_eee,
+ .set_eee = bnxt_set_eee,
+ .get_module_info = bnxt_get_module_info,
+ .get_module_eeprom = bnxt_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 98fa81e08..3abc03b60 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,6 +12,8 @@
extern const struct ethtool_ops bnxt_ethtool_ops;
+u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
+u16 bnxt_get_fw_auto_link_speeds(u32);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index e0aac65c6..461675caa 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 4badbedcb..05e3c49a7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -104,6 +104,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -111,6 +112,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
__le32 event_data2;
u8 opaque_v;
@@ -141,6 +143,7 @@ struct hwrm_async_event_cmpl_link_status_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
@@ -195,6 +198,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -237,6 +243,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
@@ -363,6 +418,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
};
+/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
@@ -377,6 +473,7 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
@@ -387,12 +484,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.0.0 */
+/* HW Resource Manager Specification 1.2.2 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 0
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_MINOR 2
+#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_STR "1.0.0"
+#define HWRM_VERSION_STR "1.2.2"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -444,7 +541,7 @@ struct cmd_nums {
#define HWRM_FUNC_BUF_RGTR (0x1fUL)
#define HWRM_PORT_PHY_CFG (0x20UL)
#define HWRM_PORT_MAC_CFG (0x21UL)
- #define RESERVED2 (0x22UL)
+ #define HWRM_PORT_TS_QUERY (0x22UL)
#define HWRM_PORT_QSTATS (0x23UL)
#define HWRM_PORT_LPBK_QSTATS (0x24UL)
#define HWRM_PORT_CLR_STATS (0x25UL)
@@ -452,6 +549,9 @@ struct cmd_nums {
#define HWRM_PORT_PHY_QCFG (0x27UL)
#define HWRM_PORT_MAC_QCFG (0x28UL)
#define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_PORT_PHY_QCAPS (0x2aUL)
+ #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
+ #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
@@ -531,6 +631,7 @@ struct cmd_nums {
__le16 unused_0[3];
};
+/* Return Codes (8 bytes) */
struct ret_codes {
__le16 error_code;
#define HWRM_ERR_CODE_SUCCESS (0x0UL)
@@ -875,10 +976,11 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
- __le16 unused_0[3];
+ u8 dflt_mac_addr[6];
};
/* Output (16 bytes) */
@@ -917,7 +1019,8 @@ struct hwrm_func_qcaps_output {
__le32 flags;
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- u8 perm_mac_address[6];
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
__le16 max_tx_rings;
@@ -942,6 +1045,67 @@ struct hwrm_func_qcaps_output {
u8 valid;
};
+/* hwrm_func_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ u8 unused_0;
+ u8 unused_1;
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ u8 unused_2;
+ __le16 dflt_vnic_id;
+ u8 unused_3;
+ u8 unused_4;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_5;
+ __le16 unused_6;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ u8 unused_7;
+ u8 unused_8;
+ u8 unused_9;
+ u8 valid;
+};
+
/* hwrm_func_cfg */
/* Input (88 bytes) */
struct hwrm_func_cfg_input {
@@ -1171,6 +1335,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1302,6 +1467,7 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1317,7 +1483,7 @@ struct hwrm_func_drv_qver_output {
};
/* hwrm_port_phy_cfg */
-/* Input (48 bytes) */
+/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1329,6 +1495,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1339,6 +1509,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
@@ -1350,12 +1522,14 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_duplex;
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
@@ -1363,6 +1537,7 @@ struct hwrm_port_phy_cfg_input {
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
@@ -1374,6 +1549,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1386,6 +1563,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
@@ -1398,7 +1578,20 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
u8 unused_1;
__le32 preemphasis;
- __le32 unused_2;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2;
+ u8 unused_3;
+ __le32 tx_lpi_timer;
+ __le32 unused_4;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
};
/* Output (16 bytes) */
@@ -1426,7 +1619,7 @@ struct hwrm_port_phy_qcfg_input {
__le16 unused_0[3];
};
-/* Output (48 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1447,6 +1640,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
u8 duplex;
#define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
@@ -1465,6 +1660,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
@@ -1475,15 +1673,18 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
@@ -1494,6 +1695,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1506,6 +1709,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
@@ -1516,31 +1722,49 @@ struct hwrm_port_phy_qcfg_output {
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 reserved1;
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
- u8 transceiver_type;
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
- u8 phy_addr;
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- u8 unused_2;
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
+ #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
__le16 link_partner_adv_speeds;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
@@ -1553,15 +1777,48 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ __le32 unused_1;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le32 unused_2;
u8 unused_3;
u8 unused_4;
u8 unused_5;
@@ -1569,7 +1826,7 @@ struct hwrm_port_phy_qcfg_output {
};
/* hwrm_port_mac_cfg */
-/* Input (32 bytes) */
+/* Input (40 bytes) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1581,6 +1838,10 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -1588,6 +1849,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -1598,6 +1861,9 @@ struct hwrm_port_mac_cfg_input {
u8 lcos_map_pri;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ __le32 unused_0;
};
/* Output (16 bytes) */
@@ -1754,7 +2020,113 @@ struct hwrm_port_blink_led_output {
u8 valid;
};
-/* hwrm_queue_qportcfg */
+/* hwrm_port_phy_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 eee_supported;
+ #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
+ u8 unused_0;
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
__le16 req_type;
@@ -1766,6 +2138,7 @@ struct hwrm_queue_qportcfg_input {
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
};
@@ -1838,6 +2211,7 @@ struct hwrm_queue_cfg_input {
#define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
@@ -1875,6 +2249,7 @@ struct hwrm_queue_buffers_cfg_input {
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
@@ -1952,6 +2327,7 @@ struct hwrm_queue_pri2cos_cfg_input {
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
__le32 enables;
u8 port_id;
@@ -2158,6 +2534,8 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2622,6 +3000,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
@@ -2747,6 +3126,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
@@ -3337,6 +3717,41 @@ struct hwrm_fw_reset_output {
u8 valid;
};
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 43ef392c8..40a7b0e09 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0c5f51049..363884dd9 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -771,12 +771,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
phy_qcfg_resp.link =
PORT_PHY_QCFG_RESP_LINK_LINK;
- if (phy_qcfg_resp.auto_link_speed)
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.auto_link_speed;
- else
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.link_speed = cpu_to_le16(
+ PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
phy_qcfg_resp.duplex =
PORT_PHY_QCFG_RESP_DUPLEX_FULL;
phy_qcfg_resp.pause =
@@ -859,8 +855,8 @@ void bnxt_update_vf_mac(struct bnxt *bp)
* default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants.
*/
- if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
- memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+ memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
@@ -869,6 +865,31 @@ update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ rc = -EADDRNOTAVAIL;
+ goto mac_done;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+ if (rc) {
+ rc = -EADDRNOTAVAIL;
+ netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+ mac);
+ }
+ return rc;
+}
#else
void bnxt_sriov_disable(struct bnxt *bp)
@@ -883,4 +904,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
void bnxt_update_vf_mac(struct bnxt *bp)
{
}
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index c151280e3..0392670ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b69dc58fa..b1d2ac818 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev)
return 0;
err1:
- cp->free_resc(dev);
+ if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
+ cp->stop_hw(dev);
+ else
+ cp->free_resc(dev);
pci_dev_put(dev->pcidev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 44ad1490b..541456398 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
static inline void dmadesc_set(struct bcmgenet_priv *priv,
void __iomem *d, dma_addr_t addr, u32 val)
{
- dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr);
+ dmadesc_set_length_status(priv, d, val);
}
static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -1225,8 +1225,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
+ txq = netdev_get_tx_queue(dev, ring->queue);
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
@@ -1335,6 +1337,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
+ unsigned int frag_size;
dma_addr_t mapping;
int ret;
@@ -1342,10 +1345,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
if (unlikely(!tx_cb_ptr))
BUG();
+
tx_cb_ptr->skb = NULL;
- mapping = skb_frag_dma_map(kdev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ frag_size = skb_frag_size(frag);
+
+ mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
@@ -1355,10 +1360,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0;
@@ -1451,15 +1456,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
else
index -= 1;
- nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
spin_lock_irqsave(&ring->lock, flags);
- if (ring->free_bds <= nr_frags + 1) {
- netif_tx_stop_queue(txq);
- netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
+ if (ring->free_bds <= (nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev,
+ "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ }
ret = NETDEV_TX_BUSY;
goto out;
}
@@ -1513,6 +1522,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
ring->prod_index += nr_frags + 1;
ring->prod_index &= DMA_P_INDEX_MASK;
+ netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
@@ -1732,7 +1743,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ring->int_enable(ring);
}
@@ -2361,6 +2372,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
+ struct netdev_queue *txq;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2375,6 +2387,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
}
}
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ netdev_tx_reset_queue(txq);
+ }
+
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+ netdev_tx_reset_queue(txq);
+
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
@@ -2490,7 +2510,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2503,7 +2523,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -2533,7 +2553,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2542,7 +2562,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3039,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index eacc55967..f1b81187a 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a91a31959..6ad2b4a51 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7374,7 +7374,7 @@ static void tg3_napi_fini(struct tg3 *tp)
static inline void tg3_netif_stop(struct tg3 *tp)
{
- tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(tp->dev); /* prevent tx timeout */
tg3_napi_disable(tp);
netif_carrier_off(tp->dev);
netif_tx_disable(tp->dev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a63551d0a..cb07d95e3 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -61,8 +61,7 @@
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
-/*
- * Graceful stop timeouts in us. We should allow up to
+/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
@@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
writel_relaxed(value, bp->regs + offset);
}
-/*
- * Find the CPU endianness by using the loopback bit of NCR register. When the
- * CPU is in big endian we need to program swaped mode for management
+/* Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swapped mode for management
* descriptor access.
*/
static bool hw_is_native_io(void __iomem *addr)
@@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp)
pdata = dev_get_platdata(&bp->pdev->dev);
- /* Check all 4 address register for vaild address */
+ /* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
@@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
- rate);
+ rate);
if (clk_set_rate(clk, rate_rounded))
netdev_err(dev, "adjusting tx_clk failed.\n");
@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
pdata = dev_get_platdata(&bp->pdev->dev);
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
+ "phy int");
if (!ret) {
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(MPE));
bp->mii_bus = mdiobus_alloc();
- if (bp->mii_bus == NULL) {
+ if (!bp->mii_bus) {
err = -ENOMEM;
goto err_out;
}
@@ -439,7 +438,7 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->pdev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp)
err = of_mdiobus_register(bp->mii_bus, np);
/* fallback to standard phy registration if no phy were
- found during dt phy registration */
+ * found during dt phy registration
+ */
if (!err && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
@@ -500,7 +500,7 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, offset += 4)
+ for (; p < end; p++, offset += 4)
*p += bp->macb_reg_readl(bp, offset);
}
@@ -568,8 +568,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev);
- /*
- * Stop transmission now
+ /* Stop transmission now
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
@@ -577,8 +576,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
- /*
- * Treat frames in TX queue including the ones that caused the error.
+ /* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@@ -608,10 +606,9 @@ static void macb_tx_error_task(struct work_struct *work)
bp->stats.tx_bytes += skb->len;
}
} else {
- /*
- * "Buffers exhausted mid-frame" errors may only happen
- * if the driver is buggy, so complain loudly about those.
- * Statistics are updated by hardware.
+ /* "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about
+ * those. Statistics are updated by hardware.
*/
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev,
@@ -663,7 +660,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+ (unsigned long)status);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head; tail++) {
@@ -723,7 +720,8 @@ static void gem_rx_refill(struct macb *bp)
struct sk_buff *skb;
dma_addr_t paddr;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
+ RX_RING_SIZE) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
@@ -731,10 +729,10 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_prepared_head++;
- if (bp->rx_skbuff[entry] == NULL) {
+ if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
- if (unlikely(skb == NULL)) {
+ if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
break;
@@ -742,7 +740,8 @@ static void gem_rx_refill(struct macb *bp)
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
- bp->rx_buffer_size, DMA_FROM_DEVICE);
+ bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb);
break;
@@ -767,7 +766,7 @@ static void gem_rx_refill(struct macb *bp)
wmb();
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ bp->rx_prepared_head, bp->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -778,14 +777,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+
desc->addr &= ~MACB_BIT(RX_USED);
}
/* Make descriptor updates visible to hardware */
wmb();
- /*
- * When this happens, the hardware stats registers for
+ /* When this happens, the hardware stats registers for
* whatever caused this is updated, so we don't have to record
* anything.
*/
@@ -881,11 +880,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- macb_rx_ring_wrap(first_frag),
- macb_rx_ring_wrap(last_frag), len);
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- /*
- * The ethernet header starts NET_IP_ALIGN bytes into the
+ /* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the
* payload word-aligned.
*
@@ -925,7 +923,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag), frag_len);
+ macb_rx_buffer(bp, frag),
+ frag_len);
offset += bp->rx_buffer_size;
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@@ -943,7 +942,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -1050,7 +1049,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ (unsigned long)status, budget);
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
@@ -1100,8 +1099,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
- /*
- * There's no point taking any more interrupts
+ /* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
@@ -1130,8 +1128,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
- /*
- * Link change detection isn't possible with RMII, so we'll
+ /* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1162,8 +1159,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /*
- * TODO: Reset the hardware, and maybe move the
+ /* TODO: Reset the hardware, and maybe move the
* netdev_err to a lower-priority context as well
* (work queue?)
*/
@@ -1182,8 +1178,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
+/* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void macb_poll_controller(struct net_device *dev)
@@ -1269,7 +1264,7 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
- if (unlikely(tx_skb == NULL)) {
+ if (unlikely(!tx_skb)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
@@ -1339,16 +1334,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
- "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
- queue_index, skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
+ "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+ queue_index, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
#endif
/* Count how many TX buffer descriptors are needed to send this
* socket buffer: skb fragments of jumbo frames may need to be
- * splitted into many buffer descriptors.
+ * split into many buffer descriptors.
*/
count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1399,8 +1394,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
netdev_dbg(bp->dev,
- "RX buffer must be multiple of %d bytes, expanding\n",
- RX_BUFFER_MULTIPLE);
+ "RX buffer must be multiple of %d bytes, expanding\n",
+ RX_BUFFER_MULTIPLE);
bp->rx_buffer_size =
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
}
@@ -1423,7 +1418,7 @@ static void gem_free_rx_buffers(struct macb *bp)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = bp->rx_skbuff[i];
- if (skb == NULL)
+ if (!skb)
continue;
desc = &bp->rx_ring[i];
@@ -1479,10 +1474,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- RX_RING_SIZE, bp->rx_skbuff);
+
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ RX_RING_SIZE, bp->rx_skbuff);
return 0;
}
@@ -1495,10 +1490,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
}
@@ -1589,8 +1584,7 @@ static void macb_reset_hw(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- /*
- * Disable RX and TX (XXX: Should we halt the transmission
+ /* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
@@ -1653,8 +1647,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
return config;
}
-/*
- * Get the DMA bus width field of the network configuration register that we
+/* Get the DMA bus width field of the network configuration register that we
* should program. We find the width from decoding the design configuration
* register to find the maximum supported data bus width.
*/
@@ -1674,8 +1667,7 @@ static u32 macb_dbw(struct macb *bp)
}
}
-/*
- * Configure the receive DMA engine
+/* Configure the receive DMA engine
* - use the correct receive buffer size
* - set best burst length for DMA operations
* (if not supported by FIFO, it will fallback to default)
@@ -1763,8 +1755,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
}
-/*
- * The hash address register is 64 bits long and takes up two
+/* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH.
*
@@ -1804,9 +1795,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
return 0;
}
-/*
- * Return the hash index value for the specified address.
- */
+/* Return the hash index value for the specified address. */
static int hash_get_index(__u8 *addr)
{
int i, j, bitval;
@@ -1822,9 +1811,7 @@ static int hash_get_index(__u8 *addr)
return hash_index;
}
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
+/* Add multicast addresses to the internal multicast-hash table. */
static void macb_sethashtable(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1832,7 +1819,8 @@ static void macb_sethashtable(struct net_device *dev)
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
- mc_filter[0] = mc_filter[1] = 0;
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = hash_get_index(ha->addr);
@@ -1843,9 +1831,7 @@ static void macb_sethashtable(struct net_device *dev)
macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
+/* Enable/Disable promiscuous and multicast modes. */
static void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
@@ -2162,9 +2148,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
- if (macb_is_gem(bp)) {
+ if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG);
- }
}
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2287,11 +2272,11 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features,
};
-/*
- * Configure peripheral capabilities according to device tree
+/* Configure peripheral capabilities according to device tree
* and integration options used
*/
-static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
+static void macb_configure_caps(struct macb *bp,
+ const struct macb_config *dt_conf)
{
u32 dcfg;
@@ -2989,7 +2974,7 @@ static int macb_probe(struct platform_device *pdev)
mac = of_get_mac_address(np);
if (mac)
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(bp->dev->dev_addr, mac);
else
macb_get_hwaddr(bp);
@@ -2997,6 +2982,7 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
+
if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
gpiod_direction_output(bp->reset_gpio, 1);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index e4153c9f4..7708d6b14 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2819,7 +2819,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
@@ -2890,26 +2890,27 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
if (status == IQ_SEND_STOP)
stop_q(lio->netdev, q_idx);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
stats->tx_done++;
stats->tx_tot_bytes += skb->len;
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -2926,7 +2927,7 @@ static void liquidio_tx_timeout(struct net_device *netdev)
netif_info(lio, tx_err, lio->netdev,
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
netdev->stats.tx_dropped);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
txqs_wake(netdev);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f67641a2f..8e23e3fad 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -602,12 +602,10 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
- buffer = kmalloc(size, GFP_KERNEL);
+ buffer = kmemdup(data, size, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- memcpy(buffer, data, size);
-
p = buffer + sizeof(struct octeon_firmware_file_header);
/* load all images */
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index c177c7cec..388cd799d 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(p->mix + MIX_ORING2, 1);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
rv = NETDEV_TX_OK;
out:
octeon_mgmt_update_tx_stats(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 95f17f8ca..16ed20357 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -499,6 +499,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +512,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bfee298fc..a19e73f11 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1442,7 +1442,7 @@ static void nicvf_reset_task(struct work_struct *work)
nicvf_stop(nic->netdev);
nicvf_open(nic->netdev);
- nic->netdev->trans_start = jiffies;
+ netif_trans_update(nic->netdev);
}
static int nicvf_config_loopback(struct nicvf *nic,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06b819db5..0ff8e60de 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -23,7 +23,7 @@ static void nicvf_get_page(struct nicvf *nic)
if (!nic->rb_pageref || !nic->rb_page)
return;
- atomic_add(nic->rb_pageref, &nic->rb_page->_count);
+ page_ref_add(nic->rb_page, nic->rb_pageref);
nic->rb_pageref = 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index d20539a6d..63a39ac97 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -274,12 +274,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
- struct phy_device *phydev = lmac->phydev;
+ struct phy_device *phydev;
int link_changed = 0;
if (!lmac)
return;
+ phydev = lmac->phydev;
+
if (!phydev->link && lmac->last_link)
link_changed = -1;
@@ -549,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -568,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -587,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -619,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -634,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -708,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -717,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e17936..42010d2e5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 526ea74e8..86f467a2c 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
+ spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 326d40095..b4fceb924 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -324,7 +324,9 @@ struct adapter_params {
unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers;
+ unsigned int bs_vers; /* bootstrap version */
unsigned int tp_vers;
+ unsigned int er_vers; /* expansion ROM version */
u8 api_vers[7];
unsigned short mtus[NMTUS];
@@ -357,6 +359,34 @@ struct sge_idma_monitor_state {
unsigned int idma_warn[2]; /* time to warning in HZ */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -394,6 +424,7 @@ struct link_config {
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -731,6 +762,7 @@ struct adapter {
u32 t4_bar0;
struct pci_dev *pdev;
struct device *pdev_dev;
+ const char *name;
unsigned int mbox;
unsigned int pf;
unsigned int flags;
@@ -776,6 +808,10 @@ struct adapter {
struct work_struct db_drop_task;
bool tid_release_task_busy;
+ /* support for mailbox command/reply logging */
+#define T4_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -1306,6 +1342,7 @@ int t4_fl_pkt_align(struct adapter *adap);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -1329,6 +1366,8 @@ int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1464,6 +1503,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 052c660ac..6ee2ed306 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
{
const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
struct port_info *pi = netdev_priv(dev);
struct port_dcb_info *dcb = &pi->dcb;
int dcb_type = pcmd->u.dcb.pgid.type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0bb41e9b9..91fb50850 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = {
.release = seq_release_private
};
+/* Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But it's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int mbox_show(struct seq_file *seq, void *v)
{
static const char * const owner[] = { "none", "FW", "driver",
@@ -1572,6 +1670,7 @@ static const struct file_operations flash_debugfs_fops = {
.owner = THIS_MODULE,
.open = mem_open,
.read = flash_read,
+ .llseek = default_llseek,
};
static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -3128,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
+ { "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
{ "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
{ "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
{ "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 408c546fe..6eea06a31 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -166,7 +166,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -301,6 +302,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!pi->dcb.enabled)
+ return 0;
+
+ return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+ (pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
struct net_device *dev = adapter->port[port_id];
@@ -311,8 +328,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
netif_carrier_on(dev);
else {
#ifdef CONFIG_CHELSIO_T4_DCB
- cxgb4_dcb_state_init(dev);
- dcb_tx_queue_prio_enable(dev, false);
+ if (cxgb4_dcb_enabled(dev)) {
+ cxgb4_dcb_state_init(dev);
+ dcb_tx_queue_prio_enable(dev, false);
+ }
#endif /* CONFIG_CHELSIO_T4_DCB */
netif_carrier_off(dev);
}
@@ -334,6 +353,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
netdev_info(dev, "port module unplugged\n");
else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ netdev_info(dev, "%s: unsupported port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ netdev_info(dev, "%s: unknown port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ netdev_info(dev, "%s: transceiver module error\n", dev->name);
+ else
+ netdev_info(dev, "%s: unknown module type %d inserted\n",
+ dev->name, pi->mod_type);
}
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -480,28 +510,12 @@ static int link_start(struct net_device *dev)
return ret;
}
-int cxgb4_dcb_enabled(const struct net_device *dev)
-{
-#ifdef CONFIG_CHELSIO_T4_DCB
- struct port_info *pi = netdev_priv(dev);
-
- if (!pi->dcb.enabled)
- return 0;
-
- return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
- (pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
-
#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
int new_dcb_enabled;
@@ -631,7 +645,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
action == FW_PORT_ACTION_GET_PORT_INFO) {
int port = FW_PORT_CMD_PORTID_G(
be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = q->adap->port[port];
+ struct net_device *dev =
+ q->adap->port[q->adap->chan_map[port]];
int state_input = ((pcmd->u.info.dcbxdis_pkd &
FW_PORT_CMD_DCBXDIS_F)
? CXGB4_DCB_INPUT_FW_DISABLED
@@ -3735,7 +3750,10 @@ static int adap_init0(struct adapter *adap)
* is excessively mismatched relative to the driver.)
*/
t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_bs_version(adap, &adap->params.bs_vers);
t4_get_tp_version(adap, &adap->params.tp_vers);
+ t4_get_exprom_version(adap, &adap->params.er_vers);
+
ret = t4_check_fw_version(adap);
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
@@ -4649,6 +4667,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap)
"suggested for optimal performance.\n");
}
+/* Dump basic information about the adapter */
+static void print_adapter_info(struct adapter *adapter)
+{
+ /* Device information */
+ dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
+ adapter->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+ dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
+ adapter->params.vpd.sn, adapter->params.vpd.pn);
+
+ /* Firmware Version */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter->pdev_dev, "No firmware loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
+
+ /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
+ * Firmware, so dev_info() is more appropriate here.)
+ */
+ if (!adapter->params.bs_vers)
+ dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
+
+ /* TP Microcode Version */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "TP Microcode version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+ /* Expansion ROM version */
+ if (!adapter->params.er_vers)
+ dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "Expansion ROM version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
+
+ /* Software/Hardware configuration */
+ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+ is_offload(adapter) ? "R" : "",
+ ((adapter->flags & USING_MSIX) ? "MSI-X" :
+ (adapter->flags & USING_MSI) ? "MSI" : ""),
+ is_offload(adapter) ? "Offload" : "non-Offload");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4676,14 +4756,8 @@ static void print_port_info(const struct net_device *dev)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
- adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "",
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, P/N: %s\n",
- adap->params.vpd.sn, adap->params.vpd.pn);
+ netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
+ dev->name, adap->params.vpd.id, adap->name, buf);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
@@ -4835,12 +4909,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
@@ -5071,6 +5156,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_offload(adapter))
attach_ulds(adapter);
+ print_adapter_info(adapter);
+
sriov:
#ifdef CONFIG_PCI_IOV
if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -5090,6 +5177,7 @@ sriov:
if (adapter->workq)
destroy_workqueue(adapter->workq);
+ kfree(adapter->mbox_log);
kfree(adapter);
out_unmap_bar0:
iounmap(regs);
@@ -5156,6 +5244,7 @@ static void remove_one(struct pci_dev *pdev)
adapter->flags &= ~DEV_ENABLED;
}
pci_release_regions(pdev);
+ kfree(adapter->mbox_log);
synchronize_rcu();
kfree(adapter);
} else
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6278e5a74..bad253beb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -3006,7 +3006,9 @@ void t4_free_sge_resources(struct adapter *adap)
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
+ __netif_tx_lock_bh(etq->txq);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ __netif_tx_unlock_bh(etq->txq);
kfree(etq->q.sdesc);
free_txq(adap, &etq->q);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 71586a3e0..a63addb4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
-static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+/**
+ * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
+ */
+static void t4_record_mbox(struct adapter *adapter,
+ const __be64 *cmd, unsigned int size,
+ int access, int execute)
{
- dev_err(adap->pdev_dev,
- "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
- (unsigned long long)t4_read_reg64(adap, data_reg),
- (unsigned long long)t4_read_reg64(adap, data_reg + 8),
- (unsigned long long)t4_read_reg64(adap, data_reg + 16),
- (unsigned long long)t4_read_reg64(adap, data_reg + 24),
- (unsigned long long)t4_read_reg64(adap, data_reg + 32),
- (unsigned long long)t4_read_reg64(adap, data_reg + 40),
- (unsigned long long)t4_read_reg64(adap, data_reg + 48),
- (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -268,12 +284,16 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
+ u16 access = 0;
+ u16 execute = 0;
u32 v;
u64 res;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ u32 pcie_fw;
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
@@ -285,13 +305,24 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
+ /* If we have a negative timeout, that implies that we can't sleep. */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
- if (v != MBOX_OWNER_DRV)
- return v ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ return ret;
+ }
+ /* Copy in the new mailbox command and send it on its way ... */
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -301,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < timeout; i += ms) {
+ for (i = 0;
+ !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
+ i < timeout;
+ i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -317,26 +351,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
continue;
}
- res = t4_read_reg64(adap, data_reg);
+ get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
+ res = be64_to_cpu(cmd_rpl[0]);
+
if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
res = FW_CMD_RETVAL_V(EIO);
} else if (rpl) {
- get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ memcpy(rpl, cmd_rpl, size);
}
- if (FW_CMD_RETVAL_G((int)res))
- dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
+
+ execute = i + ms;
+ t4_record_mbox(adap, cmd_rpl,
+ MBOX_LEN, access, execute);
return -FW_CMD_RETVAL_G((int)res);
}
}
- dump_mbox(adap, mbox, data_reg);
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- return -ETIMEDOUT;
+ return ret;
}
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
@@ -2937,6 +2976,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
}
/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
@@ -7089,52 +7142,122 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
}
/**
- * t4_handle_fw_rpl - process a FW reply message
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
* @adap: the adapter
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
* @rpl: start of the FW message
*
- * Processes a FW message, such as link state change messages.
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *p = (const void *)rpl;
+ struct adapter *adap = pi->adapter;
+
+ /* link/module state change message */
+ int speed = 0, fc = 0;
+ struct link_config *lc;
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
+ fc |= PAUSE_RX;
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
+ fc |= PAUSE_TX;
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ speed = 100;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ speed = 1000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
+
+ lc = &pi->link_cfg;
+
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, pi->port_id);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
+
+ lc->link_down_rc = rc;
+ dev_warn(adap->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->port_id, t4_link_down_rc_str(rc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
+ t4_os_link_changed(adap, pi->port_id, link_ok);
+ }
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
*/
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
{
u8 opcode = *(const u8 *)rpl;
- if (opcode == FW_PORT_CMD) { /* link/module state change message */
- int speed = 0, fc = 0;
- const struct fw_port_cmd *p = (void *)rpl;
+ /* This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ int i;
int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
- int port = adap->chan_map[chan];
- struct port_info *pi = adap2pinfo(adap, port);
- struct link_config *lc = &pi->link_cfg;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
- u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
-
- if (stat & FW_PORT_CMD_RXPAUSE_F)
- fc |= PAUSE_RX;
- if (stat & FW_PORT_CMD_TXPAUSE_F)
- fc |= PAUSE_TX;
- if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
- speed = 100;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
- speed = 1000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
- speed = 10000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
- speed = 40000;
-
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- t4_os_link_changed(adap, port, link_ok);
- }
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, port);
+ struct port_info *pi = NULL;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
}
+
+ t4_handle_get_port_info(pi, rpl);
+ } else {
+ dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
}
return 0;
}
@@ -7654,61 +7777,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
return 0;
}
-int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+/**
+ * t4_init_portinfo - allocate a virtual interface amd initialize port_info
+ * @pi: the port_info
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @mac: the MAC address of the VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC address of the VI as assigned by FW.
+ * @mac should be large enough to hold an Ethernet address.
+ * Returns < 0 on error.
+ */
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[])
{
- u8 addr[6];
- int ret, i, j = 0;
+ int ret;
struct fw_port_cmd c;
- struct fw_rss_vi_config_cmd rvc;
+ unsigned int rss_size;
memset(&c, 0, sizeof(c));
- memset(&rvc, 0, sizeof(rvc));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ pi->viid = ret;
+ pi->tx_chan = port;
+ pi->lport = port;
+ pi->rss_size = rss_size;
+
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+ pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
for_each_port(adap, i) {
- unsigned int rss_size;
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = cpu_to_be32(
- FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
if (ret)
return ret;
- ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
- if (ret < 0)
- return ret;
-
- p->viid = ret;
- p->tx_chan = j;
- p->lport = j;
- p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
-
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
- FW_PORT_CMD_MDIOADDR_G(ret) : -1;
- p->port_type = FW_PORT_CMD_PTYPE_G(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
-
- rvc.op_to_viid =
- cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
- ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
- if (ret)
- return ret;
- p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
-
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 2fc60e83a..7f59ca458 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -220,6 +220,13 @@ enum {
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ /* Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
/*
* iSCSI persistent/crash information.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 80417fc56..4705e2dea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -1392,6 +1392,10 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
+#define T5_ULP_MEMIO_FID_S 4
+#define T5_ULP_MEMIO_FID_M 0x7ff
+#define T5_ULP_MEMIO_FID_V(x) ((x) << T5_ULP_MEMIO_FID_S)
+
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc126..50812a1d6 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7ad6d4e75..392d6644f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2510,6 +2510,11 @@ struct fw_port_cmd {
#define FW_PORT_CMD_PTYPE_G(x) \
(((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+#define FW_PORT_CMD_LINKDNRC_S 5
+#define FW_PORT_CMD_LINKDNRC_M 0x7
+#define FW_PORT_CMD_LINKDNRC_G(x) \
+ (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M)
+
#define FW_PORT_CMD_MODTYPE_S 0
#define FW_PORT_CMD_MODTYPE_M 0x1f
#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7..2accab386 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 4a707c32d..734dd776c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -387,6 +387,10 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+ /* support for mailbox command/reply logging */
+#define T4VF_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
/* list of MAC addresses in MPS Hash */
struct list_head mac_hlist;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1cc8a7a69..04fc6f6d1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable,
- "default adapter ethtool message level bitmap");
+ "default adapter ethtool message level bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -1703,6 +1704,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = {
*/
/*
+ * Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But i9t's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log. But as stated above, meh ...
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
#define QPL 4
@@ -2121,6 +2221,7 @@ struct cxgb4vf_debugfs_entry {
};
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+ { "mboxlog", S_IRUGO, &mboxlog_fops },
{ "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
{ "resources", S_IRUGO, &resources_proc_fops },
@@ -2663,6 +2764,16 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4VF_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto err_free_adapter;
+ }
+ adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
+
/*
* Initialize SMP data synchronization resources.
*/
@@ -2912,6 +3023,7 @@ err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
+ kfree(adapter->mbox_log);
kfree(adapter);
err_release_regions:
@@ -2981,6 +3093,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
+ kfree(adapter->mbox_log);
kfree(adapter);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1ccd28294..1bb57d3fb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* the new TX descriptors and return success.
*/
txq_advance(&txq->q, ndesc);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ring_tx_db(adapter, &txq->q, ndesc);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9b40a85cc..438374a05 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -36,6 +36,7 @@
#ifndef __T4VF_COMMON_H__
#define __T4VF_COMMON_H__
+#include "../cxgb4/t4_hw.h"
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
@@ -227,6 +228,34 @@ struct adapter_params {
u8 nports; /* # of Ethernet "ports" */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fed83d88f..955ff7c61 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
}
-/*
- * Dump contents of mailbox with a leading tag.
+/**
+ * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
*/
-static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
+ int size, int access, int execute)
{
- dev_err(adapter->pdev_dev,
- "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -120,10 +132,13 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
1, 1, 3, 5, 10, 10, 20, 50, 100
};
+ u16 access = 0, execute = 0;
u32 v, mbox_data;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+ u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
+ __be64 cmd_rpl[MBOX_LEN / 8];
/* In T6, mailbox size is changed to 128 bytes to avoid
* invalidating the entire prefetch buffer.
@@ -148,8 +163,11 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
- if (v != MBOX_OWNER_DRV)
- return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
/*
* Write the command array into the Mailbox Data register array and
@@ -164,6 +182,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Data registers before doing the write to the VF Mailbox Control
* register.
*/
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd, size, access, 0);
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
t4_read_reg(adapter, mbox_data); /* flush write */
@@ -209,31 +229,33 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* We return the (negated) firmware command return
* code (this depends on FW_SUCCESS == 0).
*/
+ get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
/* return value in low-order little-endian word */
- v = t4_read_reg(adapter, mbox_data);
- if (FW_CMD_RETVAL_G(v))
- dump_mbox(adapter, "FW Error", mbox_data);
+ v = be64_to_cpu(cmd_rpl[0]);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
- get_mbox_rpl(adapter, rpl, size, mbox_data);
+ memcpy(rpl, cmd_rpl, size);
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER_V(MBOX_OWNER_NONE));
+ execute = i + ms;
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd_rpl, size, access,
+ execute);
return -FW_CMD_RETVAL_G(v);
}
}
- /*
- * We timed out. Return the error ...
- */
- dump_mbox(adapter, "FW Timeout", mbox_data);
- return -ETIMEDOUT;
+ /* We timed out. Return the error ... */
+ ret = -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b2182d3ba..f15560a06 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
+ netdev->vlan_features |= netdev->features;
#ifdef CONFIG_RFS_ACCEL
netdev->hw_features |= NETIF_F_NTUPLE;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 48d919414..1471e16ba 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/* Our watchdog timed out. Called by the networking layer */
@@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_init_dm9000(dev);
dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
@@ -1432,6 +1432,7 @@ dm9000_probe(struct platform_device *pdev)
int reset_gpios;
enum of_gpio_flags flags;
struct regulator *power;
+ bool inv_mac_addr = false;
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1686,9 +1687,7 @@ dm9000_probe(struct platform_device *pdev)
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
- dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
-
+ inv_mac_addr = true;
eth_hw_addr_random(ndev);
mac_src = "random";
}
@@ -1697,11 +1696,15 @@ dm9000_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
- if (ret == 0)
+ if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
ndev->name, dm9000_type_to_char(db->type),
db->io_addr, db->io_data, ndev->irq,
ndev->dev_addr, mac_src);
+ }
return 0;
out:
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 3acde3b9b..cbe84972f 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
START_DE4X5;
@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (!lp->tx_enable) /* Cannot send for now */
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
/* Test if cache is already locked - requeue skb if so */
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/* Transmit descriptor ring full or stale skb */
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->cache.lock = 0;
return NETDEV_TX_OK;
+tx_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/*
@@ -1932,7 +1935,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
}
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index afd8e78e0..8ed0fd8b1 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -192,9 +192,6 @@
(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
(pci_dev)->revision))
-/* Sten Check */
-#define DEVICE net_device
-
/* Structure/enum declaration ------------------------------- */
struct tx_desc {
__le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -313,10 +310,10 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
/* function declaration ------------------------------------- */
-static int dmfe_open(struct DEVICE *);
-static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
-static int dmfe_stop(struct DEVICE *);
-static void dmfe_set_filter_mode(struct DEVICE *);
+static int dmfe_open(struct net_device *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
+static int dmfe_stop(struct net_device *);
+static void dmfe_set_filter_mode(struct net_device *);
static const struct ethtool_ops netdev_ethtool_ops;
static u16 read_srom_word(void __iomem *, int);
static irqreturn_t dmfe_interrupt(int , void *);
@@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct net_device *);
static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, void __iomem *);
-static void send_filter_frame(struct DEVICE *);
-static void dm9132_id_table(struct DEVICE *);
+static void send_filter_frame(struct net_device *);
+static void dm9132_id_table(struct net_device *);
static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
static void dmfe_phy_write_1bit(void __iomem *, u32);
@@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
static void dmfe_timer(unsigned long);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
-static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
-static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
-static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_dynamic_reset(struct net_device *);
static void dmfe_free_rxbuffer(struct dmfe_board_info *);
-static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_init_dm910x(struct net_device *);
static void dmfe_parse_srom(struct dmfe_board_info *);
static void dmfe_program_DM9801(struct dmfe_board_info *, int);
static void dmfe_program_DM9802(struct dmfe_board_info *);
@@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev)
* The interface is opened whenever "ifconfig" actives it.
*/
-static int dmfe_open(struct DEVICE *dev)
+static int dmfe_open(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
const int irq = db->pdev->irq;
@@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev)
* Enable Tx/Rx machine
*/
-static void dmfe_init_dm910x(struct DEVICE *dev)
+static void dmfe_init_dm910x(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
*/
static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
- struct DEVICE *dev)
+ struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -728,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
} else {
db->tx_queue_cnt++; /* queue TX packet */
dw32(DCR1, 0x1); /* Issue Tx polling */
@@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
* The interface is stopped when it is brought.
*/
-static int dmfe_stop(struct DEVICE *dev)
+static int dmfe_stop(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev)
static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
{
- struct DEVICE *dev = dev_id;
+ struct net_device *dev = dev_id;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
unsigned long flags;
@@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev)
* Free TX resource after TX complete
*/
-static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
{
struct tx_desc *txptr;
void __iomem *ioaddr = db->ioaddr;
@@ -934,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
db->tx_packet_cnt++; /* Ready to send */
db->tx_queue_cnt--;
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Resource available check */
@@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
* Receive the come packet and pass to upper layer
*/
-static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
{
struct rx_desc *rxptr;
struct sk_buff *skb, *newskb;
@@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
* Set DM910X multicast address
*/
-static void dmfe_set_filter_mode(struct DEVICE * dev)
+static void dmfe_set_filter_mode(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -1545,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev)
update_cr6(db->cr6_data | 0x2000, ioaddr);
dw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
db->tx_queue_cnt++; /* Put in TX queue */
}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 5364563c4..7bcccf5ca 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
}
}
@@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
} else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
@@ -147,7 +147,7 @@ void pnic_timer(unsigned long data)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (tulip_debug > 1)
dev_info(&dev->dev,
"Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 94d0eebef..bbde90bc7 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 447d09272..e750b5ddc 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
uw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Tx resource check */
@@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data | 0x2000, ioaddr);
uw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 3c0e4d5c5..1f62b9423 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev)
enable_irq(irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
np->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f92b6d948..78f144696 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev)
dev->name, dr32(TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static netdev_tx_t
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index a28a2e583..58c6338a8 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 536686476..ed98ef1ec 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4890,11 +4890,13 @@ static int be_resume(struct be_adapter *adapter)
if (status)
return status;
- if (netif_running(netdev)) {
+ rtnl_lock();
+ if (netif_running(netdev))
status = be_open(netdev);
- if (status)
- return status;
- }
+ rtnl_unlock();
+
+ if (status)
+ return status;
netif_device_attach(netdev);
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645..4466a1187 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -860,6 +860,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
@@ -894,6 +899,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
@@ -1086,7 +1092,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
@@ -1095,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
@@ -1106,7 +1112,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
@@ -1120,7 +1126,7 @@ static int ethoc_probe(struct platform_device *pdev)
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
@@ -1133,7 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
@@ -1195,7 +1201,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1214,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1247,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 06f031715..9b7a3f5a2 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -285,6 +285,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 84384e158..e7cf313e3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -71,7 +71,6 @@ struct ftgmac100 {
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_speed;
};
@@ -807,7 +806,7 @@ err:
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = netdev->phydev;
int ier;
if (phydev->speed == priv->old_speed)
@@ -850,7 +849,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
return PTR_ERR(phydev);
}
- priv->phydev = phydev;
return 0;
}
@@ -939,27 +937,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static int ftgmac100_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int ftgmac100_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static const struct ethtool_ops ftgmac100_ethtool_ops = {
- .set_settings = ftgmac100_set_settings,
- .get_settings = ftgmac100_get_settings,
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/******************************************************************************
@@ -1085,7 +1067,7 @@ static int ftgmac100_open(struct net_device *netdev)
ftgmac100_init_hw(priv);
ftgmac100_start_hw(priv, 10);
- phy_start(priv->phydev);
+ phy_start(netdev->phydev);
napi_enable(&priv->napi);
netif_start_queue(netdev);
@@ -1111,7 +1093,7 @@ static int ftgmac100_stop(struct net_device *netdev)
netif_stop_queue(netdev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ phy_stop(netdev->phydev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1152,9 +1134,7 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
/* optional */
static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_mii_ioctl(priv->phydev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
static const struct net_device_ops ftgmac100_netdev_ops = {
@@ -1275,7 +1255,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
return 0;
err_register_netdev:
- phy_disconnect(priv->phydev);
+ phy_disconnect(netdev->phydev);
err_mii_probe:
mdiobus_unregister(priv->mii_bus);
err_register_mdiobus:
@@ -1301,7 +1281,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
unregister_netdev(netdev);
- phy_disconnect(priv->phydev);
+ phy_disconnect(netdev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b1b9ebafb..c08bd7631 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 195122e11..f58f9ea51 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -517,7 +517,6 @@ struct fec_enet_private {
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2a03857cc..fea0f330d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -967,10 +967,10 @@ fec_restart(struct net_device *ndev)
rcntl &= ~(1 << 8);
/* 1G, 100M or 10M */
- if (fep->phy_dev) {
- if (fep->phy_dev->speed == SPEED_1000)
+ if (ndev->phydev) {
+ if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
- else if (fep->phy_dev->speed == SPEED_100)
+ else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~(1 << 9);
else
rcntl |= (1 << 9);
@@ -991,7 +991,7 @@ fec_restart(struct net_device *ndev)
*/
cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ if (ndev->phydev && ndev->phydev->speed == SPEED_10)
cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
@@ -1005,7 +1005,7 @@ fec_restart(struct net_device *ndev)
/* enable pause frame*/
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
- fep->phy_dev && fep->phy_dev->pause)) {
+ ndev->phydev && ndev->phydev->pause)) {
rcntl |= FEC_ENET_FCE;
/* set FIFO threshold parameter to reduce overrun */
@@ -1197,10 +1197,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1237,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
@@ -1685,7 +1683,7 @@ static void fec_get_mac(struct net_device *ndev)
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = fep->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int status_change = 0;
/* Prevent a state halted on mii error */
@@ -1885,8 +1883,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
int phy_id;
int dev_id = fep->dev_id;
- fep->phy_dev = NULL;
-
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
@@ -1934,7 +1930,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev->advertising = phy_dev->supported;
- fep->phy_dev = phy_dev;
fep->link = 0;
fep->full_duplex = 0;
@@ -2064,30 +2059,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
}
}
-static int fec_enet_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int fec_enet_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -2220,7 +2191,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!fep->phy_dev)
+ if (!ndev->phydev)
return -ENODEV;
if (pause->tx_pause != pause->rx_pause) {
@@ -2236,17 +2207,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
if (pause->rx_pause || pause->autoneg) {
- fep->phy_dev->supported |= ADVERTISED_Pause;
- fep->phy_dev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->supported |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
} else {
- fep->phy_dev->supported &= ~ADVERTISED_Pause;
- fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->supported &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
}
if (pause->autoneg) {
if (netif_running(ndev))
fec_stop(ndev);
- phy_start_aneg(fep->phy_dev);
+ phy_start_aneg(ndev->phydev);
}
if (netif_running(ndev)) {
napi_disable(&fep->napi);
@@ -2362,8 +2333,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static int fec_enet_nway_reset(struct net_device *dev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev)
return -ENODEV;
@@ -2446,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -2568,8 +2538,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
- .get_settings = fec_enet_get_settings,
- .set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
@@ -2589,12 +2557,14 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -2849,7 +2819,7 @@ fec_enet_open(struct net_device *ndev)
goto err_enet_mii_probe;
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
@@ -2873,7 +2843,7 @@ fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -2881,8 +2851,7 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(fep->phy_dev);
- fep->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -3510,7 +3479,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
if (netif_running(ndev)) {
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
netif_device_detach(ndev);
@@ -3570,7 +3539,7 @@ static int __maybe_unused fec_resume(struct device *dev)
netif_device_attach(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 25553ee85..446ae9d60 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -66,7 +66,6 @@ struct mpc52xx_fec_priv {
/* MDIO link details */
unsigned int mdio_speed;
struct device_node *phy_node;
- struct phy_device *phydev;
enum phy_state link;
int seven_wire_mode;
};
@@ -165,7 +164,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
static void mpc52xx_fec_adjust_link(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
@@ -215,16 +214,17 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
static int mpc52xx_fec_open(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
int err = -EBUSY;
if (priv->phy_node) {
- priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
- mpc52xx_fec_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(&dev->dev, "of_phy_connect failed\n");
return -ENODEV;
}
- phy_start(priv->phydev);
+ phy_start(phydev);
}
if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
@@ -268,10 +268,9 @@ static int mpc52xx_fec_open(struct net_device *dev)
free_ctrl_irq:
free_irq(dev->irq, dev);
free_phy:
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (phydev) {
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return err;
@@ -280,6 +279,7 @@ static int mpc52xx_fec_open(struct net_device *dev)
static int mpc52xx_fec_close(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
netif_stop_queue(dev);
@@ -291,11 +291,10 @@ static int mpc52xx_fec_close(struct net_device *dev)
free_irq(priv->r_irq, dev);
free_irq(priv->t_irq, dev);
- if (priv->phydev) {
+ if (phydev) {
/* power down phy */
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return 0;
@@ -763,26 +762,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
/* ethtool interface */
-static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
@@ -796,23 +775,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
- .get_settings = mpc52xx_fec_get_settings,
- .set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = mpc52xx_fec_get_msglevel,
.set_msglevel = mpc52xx_fec_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENOTSUPP;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ea83712a6..1de2e1e51 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
int fifo_offset;
@@ -2772,7 +2772,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7..47394c45b 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
*
* Return: address of the allocated memory; NULL otherwise.
*/
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
{
unsigned long vaddr;
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5..889649ad8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 48a9c176e..61fd486c5 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -652,13 +652,13 @@ static void fs_timeout(struct net_device *dev)
spin_lock_irqsave(&fep->lock, flags);
if (dev->flags & IFF_UP) {
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -672,7 +672,7 @@ static void fs_timeout(struct net_device *dev)
static void generic_adjust_link(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link) {
@@ -741,8 +741,6 @@ static int fs_init_phy(struct net_device *dev)
return -ENODEV;
}
- fep->phydev = phydev;
-
return 0;
}
@@ -776,7 +774,7 @@ static int fs_enet_open(struct net_device *dev)
napi_disable(&fep->napi_tx);
return err;
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -792,7 +790,7 @@ static int fs_enet_close(struct net_device *dev)
netif_carrier_off(dev);
napi_disable(&fep->napi);
napi_disable(&fep->napi_tx);
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
@@ -801,8 +799,7 @@ static int fs_enet_close(struct net_device *dev)
spin_unlock_irqrestore(&fep->lock, flags);
/* release any irqs */
- phy_disconnect(fep->phydev);
- fep->phydev = NULL;
+ phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
@@ -847,26 +844,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 0;
}
-static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(fep->phydev, cmd);
-}
-
-static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(fep->phydev, cmd);
-}
-
static int fs_nway_reset(struct net_device *dev)
{
return 0;
@@ -887,24 +864,22 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
- .get_settings = fs_get_settings,
- .set_settings = fs_set_settings,
.nway_reset = fs_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = fs_get_msglevel,
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct fs_enet_private *fep = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- return phy_mii_ioctl(fep->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index f184d8f95..e29f54a35 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -149,7 +149,6 @@ struct fs_enet_private {
unsigned int last_mii_status;
int interrupt;
- struct phy_device *phydev;
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 1ba359f17..d71761a34 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -370,7 +370,7 @@ static void restart(struct net_device *dev)
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
- if (fep->phydev->speed == 100)
+ if (dev->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -396,7 +396,7 @@ static void restart(struct net_device *dev)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index bade2f8f9..35a318ed3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -254,7 +254,7 @@ static void restart(struct net_device *dev)
int r;
u32 addrhi, addrlo;
- struct mii_bus *mii = fep->phydev->mdio.bus;
+ struct mii_bus *mii = dev->phydev->mdio.bus;
struct fec_info* fec_inf = mii->priv;
r = whack_reset(fep->fec.fecp);
@@ -333,7 +333,7 @@ static void restart(struct net_device *dev)
/*
* adjust to duplex mode
*/
- if (fep->phydev->duplex) {
+ if (dev->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
@@ -363,7 +363,7 @@ static void stop(struct net_device *dev)
const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
- struct fec_info *feci = fep->phydev->mdio.bus->priv;
+ struct fec_info *feci = dev->phydev->mdio.bus->priv;
int i;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 7a184e881..e8b9c33d3 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -352,7 +352,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2f917af5..2e6785b6e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -999,7 +999,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -1009,10 +1009,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return gfar_hwtstamp_get(dev, rq);
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
@@ -1635,7 +1635,7 @@ static int gfar_suspend(struct device *dev)
gfar_start_wol_filer(priv);
} else {
- phy_stop(priv->phydev);
+ phy_stop(ndev->phydev);
}
return 0;
@@ -1664,7 +1664,7 @@ static int gfar_resume(struct device *dev)
gfar_filer_restore_table(priv);
} else {
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
}
gfar_start(priv);
@@ -1698,8 +1698,8 @@ static int gfar_restore(struct device *dev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
netif_device_attach(ndev);
enable_napi(priv);
@@ -1778,6 +1778,7 @@ static int init_phy(struct net_device *dev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
+ struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1785,9 +1786,9 @@ static int init_phy(struct net_device *dev)
interface = gfar_get_interface(dev);
- priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
- interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1796,11 +1797,11 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- priv->phydev->advertising = priv->phydev->supported;
+ phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ phydev->advertising = phydev->supported;
/* Add support for flow control, but don't advertise it by default */
- priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
return 0;
}
@@ -1944,7 +1945,7 @@ void stop_gfar(struct net_device *dev)
/* disable ints and gracefully shut down Rx/Tx DMA */
gfar_halt(priv);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
free_skb_resources(priv);
}
@@ -2076,7 +2077,7 @@ void gfar_start(struct gfar_private *priv)
gfar_ints_enable(priv);
- priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(priv->ndev); /* prevent tx timeout */
}
static void free_grp_irqs(struct gfar_priv_grp *grp)
@@ -2204,7 +2205,7 @@ int startup_gfar(struct net_device *ndev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
enable_napi(priv);
@@ -2439,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
@@ -2572,8 +2574,7 @@ static int gfar_close(struct net_device *dev)
stop_gfar(dev);
/* Disconnect from the PHY */
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(dev->phydev);
gfar_free_irq(priv);
@@ -3379,7 +3380,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
@@ -3620,7 +3621,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 val = 0;
if (!phydev->duplex)
@@ -3660,7 +3662,8 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
static noinline void gfar_update_link_state(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index cb7766797..373fd094f 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1153,7 +1153,6 @@ struct gfar_private {
phy_interface_t interface;
struct device_node *phy_node;
struct device_node *tbi_node;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int oldspeed;
int oldduplex;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 4b0ee855e..56588f2e1 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -184,40 +184,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
-
-static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (NULL == phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
-
-/* Return the current settings in the ethtool_cmd structure */
-static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
- struct gfar_priv_rx_q *rx_queue = NULL;
- struct gfar_priv_tx_q *tx_queue = NULL;
-
- if (NULL == phydev)
- return -ENODEV;
- tx_queue = priv->tx_queue[0];
- rx_queue = priv->rx_queue[0];
-
- /* etsec-1.7 and older versions have only one txic
- * and rxic regs although they support multiple queues */
- cmd->maxtxpkt = get_icft_value(tx_queue->txic);
- cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
/* Return the length of the register structure */
static int gfar_reglen(struct net_device *dev)
{
@@ -242,10 +208,12 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
unsigned int usecs)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -267,10 +235,12 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
unsigned int ticks)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -304,7 +274,7 @@ static int gfar_gcoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
rx_queue = priv->rx_queue[0];
@@ -365,7 +335,7 @@ static int gfar_scoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
/* Check the bounds of the values */
@@ -529,7 +499,7 @@ static int gfar_spauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 oldadv, newadv;
@@ -1565,8 +1535,6 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
- .get_settings = gfar_gsettings,
- .set_settings = gfar_ssettings,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
@@ -1589,4 +1557,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
.get_ts_info = gfar_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 89714f5e0..812a968a7 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
static int
-uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
- struct ucc_geth_info *ug_info = ugeth->ug_info;
if (!phydev)
return -ENODEV;
- ecmd->maxtxpkt = 1;
- ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-
- return phy_ethtool_gset(phydev, ecmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
static int
-uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
@@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
static void
@@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
#endif /* CONFIG_PM */
static const struct ethtool_ops uec_ethtool_ops = {
- .get_settings = uec_get_settings,
- .set_settings = uec_set_settings,
.get_drvinfo = uec_get_drvinfo,
.get_regs_len = uec_get_regs_len,
.get_regs = uec_get_regs,
@@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = {
.get_wol = uec_get_wol,
.set_wol = uec_set_wol,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = uec_get_ksettings,
+ .set_link_ksettings = uec_set_ksettings,
};
void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 678f5018d..399cfd217 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else {
lp->tx_started = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e51892d51..b9f2ea593 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -636,7 +636,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
pos = dma_ring_incr(pos, TX_DESC_NUM);
writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netdev_sent_queue(dev, skb->len);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a1cb461ac..7a757e88c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
return vf_cb->mac_cb;
}
-/**
- * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
- * @port_id: enet port id
- *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
- * return: dsaf port id
- *: service ports 0 - 5, debug port 6-7
- **/
-static int hns_ae_map_eport_to_dport(u32 port_id)
-{
- int port_index;
-
- if (port_id < DSAF_DEBUG_NW_NUM)
- port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
- else
- port_index = port_id - DSAF_DEBUG_NW_NUM;
-
- return port_index;
-}
-
static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
{
return container_of(dev, struct dsaf_device, ae_dev);
@@ -56,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
{
int ppe_index;
- int ppe_common_index;
struct ppe_common_cb *ppe_comm;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
- if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- ppe_index = vf_cb->port_index;
- ppe_common_index = 0;
- } else {
- ppe_index = 0;
- ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
- }
- ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+ ppe_index = vf_cb->port_index;
+
return &ppe_comm->ppe_cb[ppe_index];
}
static int hns_ae_get_q_num_per_vf(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+ return dsaf_dev->rcb_common[0]->max_q_per_vf;
}
static int hns_ae_get_vf_num_per_port(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_vfn;
+ return dsaf_dev->rcb_common[0]->max_vfn;
}
static struct ring_pair_cb *hns_ae_get_base_ring_pair(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
int q_num = rcb_comm->max_q_per_vf;
int vf_num = rcb_comm->max_vfn;
- if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
- else
- return &rcb_comm->ring_pair_cb[0];
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
}
static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
@@ -110,7 +76,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
u32 port_id)
{
- int port_idx;
int vfnum_per_port;
int qnum_per_vf;
int i;
@@ -120,11 +85,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
struct hnae_vf_cb *vf_cb;
dsaf_dev = hns_ae_get_dsaf_dev(dev);
- port_idx = hns_ae_map_eport_to_dport(port_id);
- ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
- vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
- qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
vf_cb = kzalloc(sizeof(*vf_cb) +
qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
@@ -163,14 +127,14 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
}
vf_cb->dsaf_dev = dsaf_dev;
- vf_cb->port_index = port_idx;
- vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+ vf_cb->port_index = port_id;
+ vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
ae_handle->phy_if = vf_cb->mac_cb->phy_if;
ae_handle->phy_node = vf_cb->mac_cb->phy_node;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
- ae_handle->dport_id = port_idx;
+ ae_handle->dport_id = port_id;
return ae_handle;
vf_id_err:
@@ -320,11 +284,8 @@ static void hns_ae_reset(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
- u8 ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-
hns_mac_reset(vf_cb->mac_cb);
- hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
}
}
@@ -399,11 +360,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
static void hns_ae_get_pauseparam(struct hnae_handle *handle,
u32 *auto_neg, u32 *rx_en, u32 *tx_en)
{
- assert(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ hns_mac_get_autoneg(mac_cb, auto_neg);
- hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+ hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
- hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
}
static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
@@ -436,12 +402,21 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
u32 autoneg, u32 rx_en, u32 tx_en)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
int ret;
ret = hns_mac_set_autoneg(mac_cb, autoneg);
if (ret)
return ret;
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE) {
+ ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+ mac_cb->mac_id, rx_en);
+ if (ret)
+ return ret;
+ rx_en = 0;
+ }
return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
}
@@ -689,7 +664,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
assert(handle);
mac_cb = hns_get_mac_cb(handle);
- if (!mac_cb->cpld_vaddr)
+ if (!mac_cb->cpld_ctrl)
return;
hns_set_led_opt(mac_cb);
}
@@ -709,7 +684,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
- u32 rcb_com_idx;
int i;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
@@ -717,8 +691,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_ppe_get_regs(ppe_cb, p);
p += hns_ppe_get_regs_count();
- rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
- hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
p += hns_rcb_get_common_regs_count();
for (i = 0; i < handle->q_num; i++) {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a38084a22..611581fcc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,18 +7,19 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_rcb.h"
#define MAC_EN_FLAG_V 0xada0328
@@ -81,17 +82,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
}
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
-{
- if (!mac_cb->cpld_vaddr)
- return -ENODEV;
-
- *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
- + MAC_SFP_PORT_OFFSET);
-
- return 0;
-}
-
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
{
struct mac_driver *mac_ctrl_drv;
@@ -168,10 +158,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
u8 vmid, u8 *port_num)
{
u8 tmp_port;
- u32 comm_idx;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
- if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -179,7 +168,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
- if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d!\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -192,9 +181,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
- comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
-
- if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
return -EINVAL;
@@ -234,7 +221,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_get_inner_port_num - change vf mac address
+ *hns_mac_change_vf_addr - change vf mac address
*@mac_cb: mac device
*@vmid: vmid
*@addr:mac address
@@ -249,7 +236,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
struct mac_entry_idx *old_entry;
old_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = old_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -289,7 +276,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
struct dsaf_drv_mac_single_dest_entry mac_entry;
- if (dsaf_dev && addr) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = 0;/*vlan_id;*/
mac_entry.in_port_num = mac_cb->mac_id;
@@ -380,7 +367,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
return 0;
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -418,7 +405,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -439,9 +426,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
void hns_mac_reset(struct hns_mac_cb *mac_cb)
{
- struct mac_driver *drv;
-
- drv = hns_mac_get_drv(mac_cb);
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
drv->mac_init(drv);
@@ -456,7 +442,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
if (drv->mac_pausefrm_cfg) {
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- drv->mac_pausefrm_cfg(drv, 0, 0);
+ drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
else /* mac rx must disable, dsaf pfc close instead of it*/
drv->mac_pausefrm_cfg(drv, 0, 1);
}
@@ -561,14 +547,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
*rx_en = 0;
*tx_en = 0;
}
-
- /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
- * We set the rx pause frm always be true (1), because DSAF deals with
- * the rx pause frm instead of service mac. After all, we still support
- * rx pause frm.
- */
- if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- *rx_en = 1;
}
/**
@@ -602,20 +580,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
- if (!rx_en) {
- dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (is_ver1 && (tx_en || rx_en)) {
+ dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
return -EINVAL;
}
- } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
- if (tx_en || rx_en) {
- dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
- return -EINVAL;
- }
- } else {
- dev_err(mac_cb->dev, "Unsupport this operation!");
- return -EINVAL;
}
if (mac_ctrl_drv->mac_pausefrm_cfg)
@@ -667,14 +638,18 @@ free_mac_drv:
}
/**
- *mac_free_dev - get mac information from device node
+ *hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
*@np:device node
- *@mac_mode_idx:mac mode index
+ * return: 0 --success, negative --fail
*/
-static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
- struct device_node *np, u32 mac_mode_idx)
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
+ struct device_node *np = mac_cb->dev->of_node;
+ struct regmap *syscon;
+ struct of_phandle_args cpld_args;
+ u32 ret;
+
mac_cb->link = false;
mac_cb->half_duplex = false;
mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
@@ -690,12 +665,73 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
mac_cb->max_frm = MAC_DEFAULT_MTU;
mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+ mac_cb->port_rst_off = mac_cb->mac_id;
+ mac_cb->port_mode_off = 0;
- /* Get the rest of the PHY information */
- mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
+ /* if the dsaf node doesn't contain a port subnode, get phy-handle
+ * from dsaf node
+ */
+ if (!mac_cb->fw_port) {
+ mac_cb->phy_node = of_parse_phandle(np, "phy-handle",
+ mac_cb->mac_id);
+ if (mac_cb->phy_node)
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, mac_cb->phy_node->name);
+ return 0;
+ }
+ if (!is_of_node(mac_cb->fw_port))
+ return -EINVAL;
+ /* parse property from port subnode in dsaf */
+ mac_cb->phy_node = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
if (mac_cb->phy_node)
dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
mac_cb->mac_id, mac_cb->phy_node->name);
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0));
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(to_of_node(mac_cb->fw_port),
+ "cpld-syscon", 1, 0, &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+
+ return 0;
}
/**
@@ -725,40 +761,31 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
return base + 0x40000 + mac_id * 0x4000 -
mac_mode_idx * 0x20000;
else
- return mac_cb->serdes_vaddr + 0x1000
- + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+ return dsaf_dev->ppe_base + 0x1000;
}
/**
* hns_mac_get_cfg - get mac cfg from dtb or acpi table
* @dsaf_dev: dsa fabric device struct pointer
- * @mac_idx: mac index
- * retuen 0 - success , negative --fail
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
- struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
mac_cb->dsaf_dev = dsaf_dev;
mac_cb->dev = dsaf_dev->dev;
- mac_cb->mac_id = mac_idx;
mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
mac_cb->serdes_vaddr = dsaf_dev->sds_base;
- if (dsaf_dev->cpld_base &&
- mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
- mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
- cpld_led_reset(mac_cb);
- }
mac_cb->sfp_prsnt = 0;
mac_cb->txpkt_for_led = 0;
mac_cb->rxpkt_for_led = 0;
- if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
mac_cb->mac_type = HNAE_PORT_SERVICE;
else
mac_cb->mac_type = HNAE_PORT_DEBUG;
@@ -774,53 +801,100 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
}
mac_mode_idx = (u32)ret;
- hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+ ret = hns_mac_get_info(mac_cb);
+ if (ret)
+ return ret;
+ cpld_led_reset(mac_cb);
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
return 0;
}
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 1;
+ else
+ return DSAF_MAX_PORT_NUM;
+}
+
/**
* hns_mac_init - init mac
* @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_mac_init(struct dsaf_device *dsaf_dev)
{
- int i;
+ bool found = false;
int ret;
- size_t size;
+ u32 port_id;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
struct hns_mac_cb *mac_cb;
+ struct fwnode_handle *child;
- size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
- dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
- if (!dsaf_dev->mac_cb)
- return -ENOMEM;
+ device_for_each_child_node(dsaf_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &port_id);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "get reg fail, ret=%d!\n", ret);
+ return ret;
+ }
+ if (port_id >= max_port_num) {
+ dev_err(dsaf_dev->dev,
+ "reg(%u) out of range!\n", port_id);
+ return -EINVAL;
+ }
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+ mac_cb->fw_port = child;
+ mac_cb->mac_id = (u8)port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ found = true;
+ }
- for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
- ret = hns_mac_get_cfg(dsaf_dev, i);
- if (ret)
- goto free_mac_cb;
+ /* if don't get any port subnode from dsaf node
+ * will init all port then, this is compatible with the old dts
+ */
+ if (!found) {
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+
+ mac_cb->mac_id = port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ }
+ }
+ /* init mac_cb for all port */
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = dsaf_dev->mac_cb[port_id];
+ if (!mac_cb)
+ continue;
- mac_cb = &dsaf_dev->mac_cb[i];
+ ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+ if (ret)
+ return ret;
ret = hns_mac_init_ex(mac_cb);
if (ret)
- goto free_mac_cb;
+ return ret;
}
return 0;
-
-free_mac_cb:
- dsaf_dev->mac_cb = NULL;
-
- return ret;
}
void hns_mac_uninit(struct dsaf_device *dsaf_dev)
{
- cpld_led_reset(dsaf_dev->mac_cb);
- dsaf_dev->mac_cb = NULL;
+ int i;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
+ cpld_led_reset(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
}
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
@@ -908,7 +982,7 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return 0;
return cpld_set_led_id(mac_cb, status);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 823b6e78c..97ce9a750 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -10,9 +10,10 @@
#ifndef _HNS_DSAF_MAC_H
#define _HNS_DSAF_MAC_H
-#include <linux/phy.h>
-#include <linux/kernel.h>
#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
#include "hns_dsaf_main.h"
struct dsaf_device;
@@ -310,10 +311,15 @@ struct hns_mac_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
struct mac_priv priv;
+ struct fwnode_handle *fw_port;
u8 __iomem *vaddr;
- u8 __iomem *cpld_vaddr;
u8 __iomem *sys_ctl_vaddr;
u8 __iomem *serdes_vaddr;
+ struct regmap *serdes_ctrl;
+ struct regmap *cpld_ctrl;
+ u32 cpld_ctrl_reg;
+ u32 port_rst_off;
+ u32 port_mode_off;
struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
u8 sfp_prsnt;
u8 cpld_led_value;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5978a5c8e..1c2ddb25e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,27 +7,29 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
+#include "hns_dsaf_mac.h"
#include "hns_dsaf_main.h"
-#include "hns_dsaf_rcb.h"
#include "hns_dsaf_ppe.h"
-#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+ [DSAF_MODE_DISABLE_SP] = "single-port",
};
int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
@@ -35,8 +37,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
int ret, i;
u32 desc_num;
u32 buf_size;
+ u32 reset_offset = 0;
+ u32 res_idx = 0;
const char *mode_str;
+ struct regmap *syscon;
+ struct resource *res;
struct device_node *np = dsaf_dev->dev->of_node;
+ struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
dsaf_dev->dsaf_ver = AE_VERSION_1;
@@ -73,42 +80,68 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
- dsaf_dev->sc_base = of_iomap(np, 0);
- if (!dsaf_dev->sc_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
- }
+ syscon = syscon_node_to_regmap(
+ of_parse_phandle(np, "subctrl-syscon", 0));
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->sc_base) {
+ dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ return -ENOMEM;
+ }
- dsaf_dev->sds_base = of_iomap(np, 1);
- if (!dsaf_dev->sds_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->sds_base) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ return -ENOMEM;
+ }
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
}
- dsaf_dev->ppe_base = of_iomap(np, 2);
- if (!dsaf_dev->ppe_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+ return -ENOMEM;
+ }
}
-
- dsaf_dev->io_base = of_iomap(np, 3);
- if (!dsaf_dev->io_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->ppe_base) {
+ dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+ return -ENOMEM;
+ }
+ dsaf_dev->ppe_paddr = res->start;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dsaf-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx);
+ if (!res) {
+ dev_err(dsaf_dev->dev,
+ "dsaf-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dsaf_dev->io_base) {
+ dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+ return -ENOMEM;
+ }
}
- dsaf_dev->cpld_base = of_iomap(np, 4);
- if (!dsaf_dev->cpld_base)
- dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
-
ret = of_property_read_u32(np, "desc-num", &desc_num);
if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
desc_num > HNS_DSAF_MAX_DESC_CNT) {
@@ -118,6 +151,13 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
}
dsaf_dev->desc_num = desc_num;
+ ret = of_property_read_u32(np, "reset-field-offset", &reset_offset);
+ if (ret < 0) {
+ dev_dbg(dsaf_dev->dev,
+ "get reset-field-offset fail, ret=%d!\r\n", ret);
+ }
+ dsaf_dev->reset_offset = reset_offset;
+
ret = of_property_read_u32(np, "buf-size", &buf_size);
if (ret < 0) {
dev_err(dsaf_dev->dev,
@@ -149,8 +189,6 @@ unmap_base_addr:
iounmap(dsaf_dev->sds_base);
if (dsaf_dev->sc_base)
iounmap(dsaf_dev->sc_base);
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
return ret;
}
@@ -167,9 +205,6 @@ static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
if (dsaf_dev->sc_base)
iounmap(dsaf_dev->sc_base);
-
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
}
/**
@@ -217,9 +252,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
u32 q_id, q_num_per_port;
u32 i;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
@@ -239,9 +272,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
if (AE_IS_VER1(dsaf_dev->dsaf_ver))
return;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
@@ -712,13 +743,15 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
{
- dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+ DSAF_CFG_MIX_MODE_S, !!en);
}
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+ dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
return;
dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
@@ -1022,12 +1055,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
* @mac_cb: mac contrl block
*/
static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
- int mac_id, int en)
+ int mac_id, int tc_en)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+ int mac_id, int tx_en, int rx_en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!tx_en || !rx_en)
+ dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+ return;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en)
{
- if (!en)
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!en)
+ dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+
+ return -EINVAL;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+ return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ *en = 1;
else
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+ *en = dsaf_get_dev_bit(dsaf_dev,
+ DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B);
}
/**
@@ -1039,6 +1112,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
{
u32 i;
u32 o_dsaf_cfg;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
@@ -1064,8 +1138,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
/*set dsaf pfc to 0 for parseing rx pause*/
- for (i = 0; i < DSAF_COMM_CHN; i++)
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+ hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+ }
/*msk and clr exception irqs */
for (i = 0; i < DSAF_COMM_CHN; i++) {
@@ -1264,6 +1340,9 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
u32 i;
int ret;
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
return ret;
@@ -2013,6 +2092,8 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
{
struct dsaf_hw_stats *hw_stats
= &dsaf_dev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ u32 reg_tmp;
hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
@@ -2022,8 +2103,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
- hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ hw_stats->rx_pause_frame +=
+ dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
@@ -2056,6 +2141,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
u32 i = 0;
u32 j;
u32 *p = data;
+ u32 reg_tmp;
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
/* dsaf common registers */
p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
@@ -2120,8 +2207,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
p[190 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
- p[193 + i] = dsaf_read_dev(ddev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
p[196 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
p[199 + i] = dsaf_read_dev(ddev,
@@ -2368,8 +2456,11 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+ if (!is_ver1)
+ p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
/* mark end of dsaf regs */
- for (i = 498; i < 504; i++)
+ for (i = 499; i < 504; i++)
p[i] = 0xdddddddd;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5fea226ef..f0502ba0a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -41,6 +41,7 @@ struct hns_mac_cb;
#define DSAF_STATIC_NUM 28
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
@@ -117,6 +118,7 @@ enum dsaf_mode {
DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */
DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
@@ -275,10 +277,12 @@ struct dsaf_device {
u8 __iomem *sds_base;
u8 __iomem *ppe_base;
u8 __iomem *io_base;
- u8 __iomem *cpld_base;
+ struct regmap *sub_ctrl;
+ phys_addr_t ppe_paddr;
u32 desc_num; /* desc num per queue*/
u32 buf_size; /* ring buffer size */
+ u32 reset_offset; /* reset field offset in sub sysctrl */
int buf_size_type; /* ring buffer size-type */
enum dsaf_mode dsaf_mode; /* dsaf mode */
enum hal_dsaf_mode dsaf_en;
@@ -287,7 +291,7 @@ struct dsaf_device {
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
- struct hns_mac_cb *mac_cb;
+ struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
@@ -359,14 +363,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
tab_line_addr);
}
-static inline int hns_dsaf_get_comm_idx_by_port(int port)
-{
- if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
- return 0;
- else
- return (port - DSAF_COMM_CHN + 1);
-}
-
static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
struct hnae_handle *handle)
{
@@ -417,6 +413,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en);
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index e69b02287..a837bb9e3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -7,10 +7,30 @@
* (at your option) any later version.
*/
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_mac.h"
-#include "hns_dsaf_reg.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+ if (dsaf_dev->sub_ctrl)
+ dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+ else
+ dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+ u32 ret;
+
+ if (dsaf_dev->sub_ctrl)
+ ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
+ else
+ ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+
+ return ret;
+}
void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
u16 speed, int data)
@@ -22,8 +42,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
pr_err("sfp_led_opt mac_dev is null!\n");
return;
}
- if (!mac_cb->cpld_vaddr) {
- dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+ if (!mac_cb->cpld_ctrl) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
mac_cb->mac_id);
return;
}
@@ -40,21 +60,24 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
dsaf_set_bit(value, DSAF_LED_DATA_B, data);
if (value != mac_cb->cpld_led_value) {
- dsaf_write_b(mac_cb->cpld_vaddr, value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
mac_cb->cpld_led_value = value;
}
} else {
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
}
void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return;
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
@@ -63,15 +86,19 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
{
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+ mac_cb->cpld_led_value =
+ dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg);
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_ON_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
return 2;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
break;
default:
break;
@@ -95,10 +122,8 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
- RESET_REQ_OR_DREQ);
- dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
- RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
}
void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
@@ -110,14 +135,14 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
return;
reg_val |= RESET_REQ_OR_DREQ;
- reg_val |= 0x2082082 << port;
+ reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
@@ -129,68 +154,63 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
if (port >= DSAF_XGE_NUM)
return;
- reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+ reg_val |= XGMAC_TRX_CORE_SRST_M
+ << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
{
u32 reg_val_1;
u32 reg_val_2;
+ u32 port_rst_off;
if (port >= DSAF_GE_NUM)
return;
- if (port < DSAF_SERVICE_NW_NUM) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val_1 = 0x1 << port;
+ port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- reg_val_2 = 0x1041041 << port;
+ reg_val_2 = 0x1041041 << port_rst_off;
else
- reg_val_2 = 0x2082082 << port;
+ reg_val_2 = 0x2082082 << port_rst_off;
if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
reg_val_2);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
}
} else {
- reg_val_1 = 0x15540 << (port - 6);
- reg_val_2 = 0x100 << (port - 6);
+ reg_val_1 = 0x15540 << dsaf_dev->reset_offset;
+ reg_val_2 = 0x100 << dsaf_dev->reset_offset;
if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
reg_val_2);
}
}
@@ -201,24 +221,23 @@ void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
u32 reg_val = 0;
u32 reg_addr;
- reg_val |= RESET_REQ_OR_DREQ << port;
+ reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
if (val == 0)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
{
- int comm_index = ppe_common->comm_index;
struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
u32 reg_val;
u32 reg_addr;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val = RESET_REQ_OR_DREQ;
if (val == 0)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
@@ -226,7 +245,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
} else {
- reg_val = 0x100 << (comm_index - 1);
+ reg_val = 0x100 << dsaf_dev->reset_offset;
if (val == 0)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
@@ -234,7 +253,7 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
/**
@@ -246,36 +265,45 @@ phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
u32 mode;
u32 reg;
- u32 shift;
bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
int mac_id = mac_cb->mac_id;
- phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ phy_interface_t phy_if;
- if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 0 && mac_id <= 3) {
- reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
- shift = is_ver1 ? 0 : mac_id;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ if (is_ver1) {
+ if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (mac_id >= 0 && mac_id <= 3)
+ reg = HNS_MAC_HILINK4_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 4 && mac_id <= 7) {
- reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
- shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ reg = HNS_MAC_HILINK3_REG;
+ } else{
+ if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+ reg = HNS_MAC_HILINK4V2_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
+ reg = HNS_MAC_HILINK3V2_REG;
}
+
+ mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+ if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+
return phy_if;
}
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ if (!mac_cb->cpld_ctrl)
+ return -ENODEV;
+
+ *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
+ + MAC_SFP_PORT_OFFSET);
+
+ return 0;
+}
+
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
@@ -312,7 +340,14 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
pr_info("no sfp in this eth\n");
}
- dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ if (mac_cb->serdes_ctrl) {
+ u32 origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+ dsaf_set_field(origin, 1ull << 10, 10, !!en);
+ dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+ } else {
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 5b7ae5ff4..8cd151a52 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *hns_ppe_common_get_ioaddr(
- struct ppe_common_cb *ppe_common)
+static void __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
- void __iomem *base_addr;
-
- int idx = ppe_common->comm_index;
-
- if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = ppe_common->dsaf_dev->ppe_base
- + PPE_COMMON_REG_OFFSET;
- else
- base_addr = ppe_common->dsaf_dev->sds_base
- + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + PPE_COMMON_REG_OFFSET;
-
- return base_addr;
+ return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
}
/**
@@ -90,7 +78,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
struct ppe_common_cb *ppe_common;
int ppe_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
else
ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
@@ -103,7 +91,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
ppe_common->ppe_num = ppe_num;
ppe_common->dsaf_dev = dsaf_dev;
ppe_common->comm_index = comm_index;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
else
ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
@@ -124,32 +112,8 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx)
{
- void __iomem *base_addr;
- int common_idx = ppe_common->comm_index;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
- base_addr = ppe_common->dsaf_dev->ppe_base +
- ppe_idx * PPE_REG_OFFSET;
-
- } else {
- base_addr = ppe_common->dsaf_dev->sds_base +
- (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
- }
-
- return base_addr;
-}
-
-static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
-{
- int port;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
- port = idx;
- else
- port = HNS_PPE_SERVICE_NW_ENGINE_NUM
- + ppe_common->comm_index - 1;
- return port;
+ return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
@@ -164,7 +128,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
ppe_cb->next = NULL;
ppe_cb->ppe_common_cb = ppe_common;
ppe_cb->index = i;
- ppe_cb->port = hns_ppe_get_port(ppe_common, i);
ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
ppe_cb->virq = 0;
}
@@ -318,7 +281,7 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
{
struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
- u32 port = ppe_cb->port;
+ u32 port = ppe_cb->index;
struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
int i;
@@ -332,10 +295,12 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
/* clr and msk except irq*/
hns_ppe_exc_irq_en(ppe_cb, 0);
- if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
- else
+ dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+ } else {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ }
hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
hns_ppe_cnt_clr_ce(ppe_cb);
@@ -375,7 +340,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
u32 i;
for (i = 0; i < ppe_common->ppe_num; i++) {
- hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ if (ppe_common->dsaf_dev->mac_cb[i])
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
}
}
@@ -408,8 +374,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
if (ret)
return;
- for (i = 0; i < ppe_common->ppe_num; i++)
- hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ /* We only need to initiate ppe when the port exists */
+ if (dsaf_dev->mac_cb[i])
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ }
ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index e9c0ec2fa..9d8e643e8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,6 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- u8 port; /* port id in dsaf */
void __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 28ee26e5c..4ef6d23d9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout(
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
{
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return HNS_RCB_SERVICE_NW_ENGINE_NUM;
else
return HNS_RCB_DEBUG_NW_ENGINE_NUM;
@@ -430,36 +430,20 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
- int comm_index = rcb_common->comm_index;
- int port;
- int q_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
- port = ring_idx / q_num;
- } else {
- port = 0; /* config debug-ports port_id_in_comm to 0*/
- }
-
- return port;
+ return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
#define SERVICE_RING_IRQ_IDX(v1) \
((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_IDX(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_OFFSET(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET)
static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
{
- int comm_index = rcb_common->comm_index;
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return SERVICE_RING_IRQ_IDX(is_ver1);
else
- return DEBUG_RING_IRQ_IDX(is_ver1) +
- (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1);
+ return HNS_DEBUG_RING_IRQ_IDX;
}
#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
@@ -549,7 +533,7 @@ int hns_rcb_set_coalesce_usecs(
return 0;
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
dev_err(rcb_common->dsaf_dev->dev,
"error: not support coalesce_usecs setting!\n");
return -EINVAL;
@@ -601,113 +585,82 @@ int hns_rcb_set_coalesced_frames(
*@max_vfn : max vfn number
*@max_q_per_vf:max ring number per vm
*/
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
- u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+ u16 *max_q_per_vf)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_mode) {
- case DSAF_MODE_DISABLE_6PORT_0VM:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- case DSAF_MODE_DISABLE_FIX:
- *max_vfn = 1;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_2PORT_64VM:
- *max_vfn = 64;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_6PORT_16VM:
- *max_vfn = 16;
- *max_q_per_vf = 1;
- break;
- default:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- }
- } else {
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
*max_vfn = 1;
*max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_dev->dsaf_mode) {
- case DSAF_MODE_ENABLE_FIX:
- return 1;
-
- case DSAF_MODE_DISABLE_FIX:
- return 6;
-
- case DSAF_MODE_ENABLE_0VM:
- return 32;
-
- case DSAF_MODE_DISABLE_6PORT_0VM:
- case DSAF_MODE_ENABLE_16VM:
- case DSAF_MODE_DISABLE_6PORT_2VM:
- case DSAF_MODE_DISABLE_6PORT_16VM:
- case DSAF_MODE_DISABLE_6PORT_4VM:
- case DSAF_MODE_ENABLE_8VM:
- return 96;
-
- case DSAF_MODE_DISABLE_2PORT_16VM:
- case DSAF_MODE_DISABLE_2PORT_8VM:
- case DSAF_MODE_ENABLE_32VM:
- case DSAF_MODE_DISABLE_2PORT_64VM:
- case DSAF_MODE_ENABLE_128VM:
- return 128;
-
- default:
- dev_warn(dsaf_dev->dev,
- "get ring num fail,use default!dsaf_mode=%d\n",
- dsaf_dev->dsaf_mode);
- return 128;
- }
- } else {
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
- void __iomem *base_addr;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
- else
- base_addr = dsaf_dev->sds_base
- + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + RCB_COMMON_REG_OFFSET;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
- return base_addr;
+ return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
}
-static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
{
- struct device_node *np = dsaf_dev->dev->of_node;
- phys_addr_t phy_addr;
- const __be32 *tmp_addr;
- u64 addr_offset = 0;
- u64 size = 0;
- int index = 0;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- index = 2;
- addr_offset = RCB_COMMON_REG_OFFSET;
- } else {
- index = 1;
- addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
- RCB_COMMON_REG_OFFSET;
- }
- tmp_addr = of_get_address(np, index, &size, NULL);
- phy_addr = of_translate_address(np, tmp_addr);
- return phy_addr + addr_offset;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
}
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
@@ -717,7 +670,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
u16 max_vfn;
u16 max_q_per_vf;
- int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev);
rcb_common =
devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
@@ -732,12 +685,12 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
rcb_common->desc_num = dsaf_dev->desc_num;
- hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
rcb_common->max_vfn = max_vfn;
rcb_common->max_q_per_vf = max_q_per_vf;
- rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
- rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+ rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
dsaf_dev->rcb_common[comm_index] = rcb_common;
return 0;
@@ -932,7 +885,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
{
u32 *regs = data;
bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
- bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+ bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
u32 reg_tmp;
u32 reg_num_tmp;
u32 i = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index eb61014ad..bd54dac82 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7d7204f45..7c3b5103d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -10,25 +10,20 @@
#ifndef _DSAF_REG_H_
#define _DSAF_REG_H_
-#define HNS_DEBUG_RING_IRQ_IDX 55
-#define HNS_SERVICE_RING_IRQ_IDX 59
-#define HNS_DEBUG_RING_IRQ_OFFSET 2
-#define HNSV2_DEBUG_RING_IRQ_IDX 409
-#define HNSV2_SERVICE_RING_IRQ_IDX 25
-#define HNSV2_DEBUG_RING_IRQ_OFFSET 9
-
-#define DSAF_MAX_PORT_NUM_PER_CHIP 8
-#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
-#define DSAF_MAX_VM_NUM 128
-
-#define DSAF_COMM_DEV_NUM 3
-#define DSAF_PPE_INODE_BASE 6
-#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX 0
+#define HNS_SERVICE_RING_IRQ_IDX 59
+#define HNSV2_SERVICE_RING_IRQ_IDX 25
+
+#define DSAF_MAX_PORT_NUM 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 1
+#define DSAF_PPE_INODE_BASE 6
#define DSAF_DEBUG_NW_NUM 2
#define DSAF_SERVICE_NW_NUM 6
#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
-#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
#define DSAF_PORT_TYPE_NUM 3
#define DSAF_NODE_NUM 18
@@ -137,6 +132,7 @@
#define DSAF_PPE_INT_STS_0_REG 0x1E0
#define DSAF_ROCEE_INT_STS_0_REG 0x200
#define DSAFV2_SERDES_LBK_0_REG 0x220
+#define DSAF_PAUSE_CFG_REG 0x240
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -155,6 +151,7 @@
#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
@@ -711,6 +708,10 @@
#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
#define DSAF_PFC_UNINT_CNT_S 0
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
#define DSAF_PPE_QID_CFG_M 0xFF
#define DSAF_PPE_QID_CFG_S 0
@@ -988,6 +989,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
return readl(reg_addr + reg);
}
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+ regmap_write(base, reg, value);
+}
+
+static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(base, reg, &val);
+ return val;
+}
+
#define dsaf_read_dev(a, reg) \
dsaf_read_reg((a)->io_base, (reg))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 687204b78..e621636e6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1275,7 +1275,7 @@ void hns_nic_net_reinit(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
usleep_range(1000, 2000);
@@ -1376,7 +1376,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
ret = hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
if (ret == NETDEV_TX_OK) {
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
}
@@ -1648,7 +1648,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
rtnl_lock();
/* put off any impending NetWatchDogTimeout */
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
@@ -1873,6 +1873,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hns_nic_priv *priv;
struct device_node *node = dev->of_node;
+ u32 port_id;
int ret;
ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
@@ -1896,10 +1897,18 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
dev_err(dev, "not find ae-handle\n");
goto out_read_prop_fail;
}
-
- ret = of_property_read_u32(node, "port-id", &priv->port_id);
- if (ret)
- goto out_read_prop_fail;
+ /* try to find port-idx-in-ae first */
+ ret = of_property_read_u32(node, "port-idx-in-ae", &port_id);
+ if (ret) {
+ /* only for old code compatible */
+ ret = of_property_read_u32(node, "port-id", &port_id);
+ if (ret)
+ goto out_read_prop_fail;
+ /* for old dts, we need to caculate the port offset */
+ port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+ : port_id - HNS_SRV_OFFSET;
+ }
+ priv->port_id = port_id;
hns_init_mac_addr(ndev);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index c68ab3d34..337efa582 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -18,6 +18,9 @@
#include "hnae.h"
+#define HNS_DEBUG_OFFSET 6
+#define HNS_SRV_OFFSET 2
+
enum hns_nic_state {
NIC_STATE_TESTING = 0,
NIC_STATE_RESETTING,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887..67a648c7d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,7 +46,6 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
@@ -646,8 +645,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +717,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
@@ -780,8 +775,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1104,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1126,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3daf2d4a7..631dbc7b4 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 7ce6379fd..befb4ac3e 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index c984998b3..3dbc53c21 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 353f57f67..21c84cc9c 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
return 0;
}
#endif
@@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/******************************************************
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2a0dc127d..54efa9a51 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1169,16 +1169,15 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
+ if (!port) {
+ netdev_err(NULL, "unknown portnum %x\n", portnum);
+ return;
+ }
dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
- if (!port) {
- netdev_err(dev, "unknown portnum %x\n", portnum);
- break;
- }
-
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01..4c9771d57 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev)
dev->no_mcast = 1;
netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
- dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev->ndev); /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
netif_tx_disable(dev->ndev);
}
@@ -1377,7 +1377,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
DBG2(dev, "stopped TX queue" NL);
}
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
++dev->stats.tx_packets;
dev->stats.tx_bytes += len;
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index d3b9d1033..5b88cc690 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static int ar8035_init(struct mii_phy *phy)
+{
+ phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+ phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+ phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */
+ phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+ return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+ .init = ar8035_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+ .phy_id = 0x004dd070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Atheros 8035 Gigabit Ethernet",
+ .ops = &ar8035_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &ar8035_phy_def,
&genmii_phy_def,
NULL
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16eee..88f3c85fb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
@@ -74,6 +75,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
@@ -88,12 +90,14 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
@@ -467,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
@@ -517,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -561,10 +566,141 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers. Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len, u8 *hdr_data)
+{
+ int len = 0;
+ u8 *hdr;
+
+ hdr_len[0] = sizeof(struct ethhdr);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ hdr_len[1] = ip_hdr(skb)->ihl * 4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hdr_len[1] = sizeof(struct ipv6hdr);
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ }
+
+ memset(hdr_data, 0, 120);
+ if ((hdr_field >> 6) & 1) {
+ hdr = skb_mac_header(skb);
+ memcpy(hdr_data, hdr, hdr_len[0]);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr = skb_network_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[1]);
+ len += hdr_len[1];
+ }
+
+ if ((hdr_field >> 4) & 1) {
+ hdr = skb_transport_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[2]);
+ len += hdr_len[2];
+ }
+ return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
+{
+ union sub_crq hdr_desc;
+ int tmp_len = len;
+ u8 *data, *cur;
+ int tmp;
+
+ while (tmp_len > 0) {
+ cur = hdr_data + len - tmp_len;
+
+ memset(&hdr_desc, 0, sizeof(hdr_desc));
+ if (cur != hdr_data) {
+ data = hdr_desc.hdr_ext.data;
+ tmp = tmp_len > 29 ? 29 : tmp_len;
+ hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc.hdr_ext.len = tmp;
+ } else {
+ data = hdr_desc.hdr.data;
+ tmp = tmp_len > 24 ? 24 : tmp_len;
+ hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc.hdr.len = tmp;
+ hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc.hdr.flag = hdr_field << 1;
+ }
+ memcpy(data, cur, tmp);
+ tmp_len -= tmp;
+ *scrq_arr = hdr_desc;
+ scrq_arr++;
+ }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+ int *num_entries, u8 hdr_field)
+{
+ int hdr_len[3] = {0, 0, 0};
+ int tot_len, len;
+ u8 *hdr_data = txbuff->hdr_data;
+
+ tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+ txbuff->hdr_data);
+ len = tot_len;
+ len -= 24;
+ if (len > 0)
+ num_entries += len % 29 ? len / 29 + 1 : len / 29;
+ create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ txbuff->indir_arr + 1);
+}
+
static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
+ u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +715,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ int num_entries = 1;
unsigned char *dst;
u64 *handle_array;
int index = 0;
@@ -644,11 +781,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
- lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+ hdrs += 2;
+ }
+ /* determine if l2/3/4 headers are sent to firmware */
+ if ((*hdrs >> 7) & 1 &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->indir_arr[0] = tx_crq;
+ tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+ sizeof(tx_buff->indir_arr),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "tx: unable to map descriptor array\n");
+ tx_map_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
+ } else {
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ &tx_crq);
+ }
if (lpar_rc != H_SUCCESS) {
dev_err(dev, "tx failed with code %ld\n", lpar_rc);
@@ -832,7 +993,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_pools(adapter);
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1054,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
@@ -1072,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -1098,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
@@ -1109,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
@@ -1118,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -1159,6 +1333,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
+ u8 first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1356,13 @@ restart_loop:
txbuff->data_dma[j] = 0;
txbuff->used_bounce = false;
}
+ /* if sub_crq was sent indirectly */
+ first = txbuff->indir_arr[0].generic.first;
+ if (first == IBMVNIC_CRQ_CMD) {
+ dma_unmap_single(dev, txbuff->indir_dma,
+ sizeof(txbuff->indir_arr),
+ DMA_TO_DEVICE);
+ }
if (txbuff->last_frag)
dev_kfree_skb_any(txbuff->skb);
@@ -1229,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
@@ -1237,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
@@ -1261,9 +1502,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
entries_page : adapter->max_rx_add_entries_per_subcrq;
/* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->min_tx_queues;
- adapter->req_rx_queues = adapter->min_rx_queues;
- adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+ adapter->req_tx_queues = adapter->max_tx_queues;
+ adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
}
@@ -1317,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1334,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
@@ -1393,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
@@ -1494,6 +1712,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
return rc;
}
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 ioba, u64 num_entries)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+ cpu_to_be64(remote_handle),
+ ioba, num_entries);
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
union ibmvnic_crq *crq)
{
@@ -1589,13 +1829,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
goto buf_map_failed;
}
- rsp_buffer_size =
- sizeof(struct ibmvnic_login_rsp_buffer) +
- sizeof(u64) * (adapter->req_tx_queues +
- adapter->req_rx_queues *
- adapter->req_rx_add_queues + adapter->
- req_rx_add_queues) +
- sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+ rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+ sizeof(u64) * adapter->req_tx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
if (!login_rsp_buffer)
@@ -1918,6 +2156,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->features |= NETIF_F_RXCSUM;
+
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -1931,7 +2173,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
@@ -1943,7 +2185,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
@@ -2158,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2210,6 +2452,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
dma_unmap_single(dev, adapter->login_rsp_buf_token,
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ /* If the number of queues requested can't be allocated by the
+ * server, the login response will return with code 1. We will need
+ * to resend the login buffer with fewer queues requested.
+ */
+ if (login_rsp_crq->generic.rc.code) {
+ adapter->renegotiate = true;
+ complete(&adapter->init_done);
+ return 0;
+ }
+
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -2459,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
@@ -2941,14 +3193,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
@@ -2965,8 +3217,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
@@ -3002,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
@@ -3355,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
@@ -3393,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
@@ -3435,23 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
- /* needed to pull init_sub_crqs outside of an interrupt context
- * because it creates IRQ mappings for the subCRQ queues, causing
- * a kernel warning
- */
- init_sub_crqs(adapter, 0);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
+ }
+ } while (adapter->renegotiate);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3459,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993cc7..e82898fd5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
int pool_index;
bool last_frag;
bool used_bounce;
+ union sub_crq indir_arr[6];
+ u8 hdr_data[140];
+ dma_addr_t indir_dma;
};
struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
int requested_caps;
+ bool renegotiate;
/* rx structs */
struct napi_struct *napi;
@@ -1041,4 +1045,6 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3ac9..714bd1014 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@ config E100
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
- Use the above information and the Adapter & Driver ID Guide at:
+ Use the above information and the Adapter & Driver ID Guide that
+ can be located at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com>
to identify the adapter.
- For the latest Intel PRO/100 network driver for Linux, see:
-
- <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
@@ -47,12 +44,7 @@ config E1000
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -71,12 +63,8 @@ config E1000E
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
- identify your adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ identify your adapter, go to the Adapter & Driver ID Guide that
+ can be located at:
<http://support.intel.com>
@@ -101,12 +89,7 @@ config IGB
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -142,12 +125,7 @@ config IGBVF
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -164,12 +142,7 @@ config IXGB
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -187,12 +160,7 @@ config IXGBE
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -243,12 +211,7 @@ config IXGBEVF
---help---
This driver supports Intel(R) PCI Express virtual functions for the
Intel(R) ixgbe driver. For more information on how to identify your
- adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ adapter, go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -266,12 +229,7 @@ config I40E
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -326,12 +284,7 @@ config I40EVF
---help---
This driver supports Intel(R) XL710 and X710 virtual functions.
For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -347,12 +300,7 @@ config FM10K
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
- go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd..d7bdea79e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c7f..975eeb885 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000_close(netdev);
else
e1000_reset(adapter);
@@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev,
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
if (if_running)
- dev_open(netdev);
+ e1000_open(netdev);
} else {
e_info(hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ae90d4f12..f42129d09 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
-static int e1000_open(struct net_device *netdev);
-static int e1000_close(struct net_device *netdev);
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
static void e1000_configure_tx(struct e1000_adapter *adapter);
static void e1000_configure_rx(struct e1000_adapter *adapter);
static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 2af603f3e..cd3913760 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
return 0;
}
@@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->phy.media_type != e1000_media_type_copper)
- reg &= ~(1 << 20);
+ reg &= ~BIT(20);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
/* Disable IPv6 extension header parsing because some malformed
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 5f7016442..7fd4d5459 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
break;
}
@@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
break;
case e1000_82574:
case e1000_82583:
- reg |= (1 << 26);
+ reg |= BIT(26);
break;
default:
break;
@@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg &= ~((1 << 29) | (1 << 30));
- reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg &= ~(BIT(29) | BIT(30));
+ reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
break;
default:
@@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
- reg &= ~(1 << 29);
+ reg &= ~BIT(29);
ew32(CTRL, reg);
break;
default:
@@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
- reg &= ~(1 << 23);
- reg |= (1 << 22);
+ reg &= ~BIT(23);
+ reg |= BIT(22);
ew32(CTRL_EXT, reg);
break;
default:
@@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(GCR, reg);
/* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg =
- 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ BIT(hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293bad..ef96cd11d 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -109,18 +109,18 @@ struct e1000_info;
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
- (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
+ (1u << 16) | /* wthresh must be +1 more than desired */\
+ (1u << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
- (4 << 16) | /* set writeback threshold */ \
- (4 << 8) | /* set prefetch threshold */ \
+ (4u << 16) | /* set writeback threshold */ \
+ (4u << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
enum e1000_boards {
board_82571,
@@ -347,6 +347,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
+ s32 ptp_delta;
u16 eee_advert;
};
@@ -404,53 +405,53 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT (1 << 0)
-#define FLAG_HAS_FLASH (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
-#define FLAG_HAS_WOL (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
-#define FLAG_READ_ONLY_NVM (1 << 8)
-#define FLAG_IS_ICH (1 << 9)
-#define FLAG_HAS_MSIX (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
-#define FLAG_IS_QUAD_PORT_A (1 << 12)
-#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
-#define FLAG_APME_IN_WUC (1 << 15)
-#define FLAG_APME_IN_CTRL3 (1 << 16)
-#define FLAG_APME_CHECK_PORT_B (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
-#define FLAG_NO_WAKE_UCAST (1 << 19)
-#define FLAG_MNG_PT_ENABLED (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
-#define FLAG_RX_NEEDS_RESTART (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
-#define FLAG_SMART_POWER_DOWN (1 << 26)
-#define FLAG_MSI_ENABLED (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RESTART_NOW (1 << 30)
-#define FLAG_MSI_TEST_FAILED (1 << 31)
-
-#define FLAG2_CRC_STRIPPING (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
-#define FLAG2_HAS_PHY_STATS (1 << 4)
-#define FLAG2_HAS_EEE (1 << 5)
-#define FLAG2_DMA_BURST (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
-#define FLAG2_DISABLE_AIM (1 << 8)
-#define FLAG2_CHECK_PHY_HANG (1 << 9)
-#define FLAG2_NO_DISABLE_RX (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
+#define FLAG_HAS_AMT BIT(0)
+#define FLAG_HAS_FLASH BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER BIT(2)
+#define FLAG_HAS_WOL BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES BIT(7)
+#define FLAG_READ_ONLY_NVM BIT(8)
+#define FLAG_IS_ICH BIT(9)
+#define FLAG_HAS_MSIX BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN BIT(11)
+#define FLAG_IS_QUAD_PORT_A BIT(12)
+#define FLAG_IS_QUAD_PORT BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP BIT(14)
+#define FLAG_APME_IN_WUC BIT(15)
+#define FLAG_APME_IN_CTRL3 BIT(16)
+#define FLAG_APME_CHECK_PORT_B BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18)
+#define FLAG_NO_WAKE_UCAST BIT(19)
+#define FLAG_MNG_PT_ENABLED BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO BIT(23)
+#define FLAG_RX_NEEDS_RESTART BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP BIT(25)
+#define FLAG_SMART_POWER_DOWN BIT(26)
+#define FLAG_MSI_ENABLED BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE BIT(29)
+#define FLAG_RESTART_NOW BIT(30)
+#define FLAG_MSI_TEST_FAILED BIT(31)
+
+#define FLAG2_CRC_STRIPPING BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP BIT(1)
+#define FLAG2_IS_DISCARDING BIT(2)
+#define FLAG2_DISABLE_ASPM_L1 BIT(3)
+#define FLAG2_HAS_PHY_STATS BIT(4)
+#define FLAG2_HAS_EEE BIT(5)
+#define FLAG2_DMA_BURST BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S BIT(7)
+#define FLAG2_DISABLE_AIM BIT(8)
+#define FLAG2_CHECK_PHY_HANG BIT(9)
+#define FLAG2_NO_DISABLE_RX BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -480,6 +481,8 @@ extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
+int e1000e_open(struct net_device *netdev);
+int e1000e_close(struct net_device *netdev);
void e1000e_up(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f30d..7aff68a4a 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
return 0;
}
@@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised =
+ ADVERTISE_1000_FULL;
+ } else {
+ mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+ }
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
@@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- mask |= (1 << 18);
+ mask |= BIT(18);
break;
default:
break;
@@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* SHRAH[9] different than the others */
if (i == 10)
- mask |= (1 << 30);
+ mask |= BIT(30);
else
- mask &= ~(1 << 30);
+ mask &= ~BIT(30);
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
if (i == 4)
- mask |= (1 << 30);
+ mask |= BIT(30);
/* RAR[1-6] owned by management engine - skipping */
if (i > 0)
i += 6;
@@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (i = 0; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (adapter->flags & FLAG_IS_ICH) {
switch (mask) {
@@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82579:
/* Disable PHY energy detect power down */
e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
- e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
/* Disable full chip energy detect */
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* disable autoneg */
ctrl = er32(TXCW);
- ctrl &= ~(1 << 31);
+ ctrl &= ~BIT(31);
ew32(TXCW, ctrl);
link = (er32(STATUS) & E1000_STATUS_LU);
@@ -1816,7 +1825,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000e_close(netdev);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1858,7 @@ static void e1000_diag_test(struct net_device *netdev,
clear_bit(__E1000_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ e1000e_open(netdev);
} else {
/* Online tests */
@@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_ALL));
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_ALL));
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c0f4887ea..3e11322d8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
while (value > PCI_LTR_VALUE_MASK) {
scale++;
- value = DIV_ROUND_UP(value, (1 << 5));
+ value = DIV_ROUND_UP(value, BIT(5));
}
if (scale > E1000_LTRV_SCALE_MAX) {
e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
- phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+ phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
break;
@@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
/* Restore SMBus frequency */
if (freq--) {
phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
- phy_data |= (freq & (1 << 0)) <<
+ phy_data |= (freq & BIT(0)) <<
HV_SMB_ADDR_FREQ_LOW_SHIFT;
- phy_data |= (freq & (1 << 1)) <<
+ phy_data |= (freq & BIT(1)) <<
(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
} else {
e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* disable Rx path while enabling/disabling workaround */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
if (ret_val)
return ret_val;
@@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* Enable jumbo frame workaround in the MAC */
mac_reg = er32(FFLT_DBG);
- mac_reg &= ~(1 << 14);
+ mac_reg &= ~BIT(14);
mac_reg |= (7 << 15);
ew32(FFLT_DBG, mac_reg);
@@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data | (1 << 0));
+ data | BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 13);
+ data &= ~BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
if (ret_val)
return ret_val;
} else {
@@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data & ~(1 << 0));
+ data & ~BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data |= (1 << 13);
+ data |= BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
if (ret_val)
return ret_val;
}
/* re-enable Rx path after enabling/disabling workaround */
- return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
}
/**
@@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = er32(CTRL_EXT);
- reg |= (1 << 22);
+ reg |= BIT(22);
/* Enable PHY low-power state when MAC is at D3 w/o WoL */
if (hw->mac.type >= e1000_pchlan)
reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
if (hw->mac.type == e1000_ich8lan)
- reg |= (1 << 28) | (1 << 29);
- reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ reg |= BIT(28) | BIT(29);
+ reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
- reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ reg |= BIT(28);
+ reg |= BIT(24) | BIT(26) | BIT(30);
ew32(TARC(1), reg);
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = er32(STATUS);
- reg &= ~(1 << 31);
+ reg &= ~BIT(31);
ew32(STATUS, reg);
}
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2311f6003..67163ca89 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -73,10 +73,10 @@
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
+#define E1000_ICH_NVM_SIG_WORD 0x13u
+#define E1000_ICH_NVM_SIG_MASK 0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u
+#define E1000_ICH_NVM_SIG_VALUE 0x80u
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c283..b322011ec 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13d9..2b2e2f8c6 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
else
next_desc = "";
pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
adapter->eiac_mask |= E1000_IMS_OTHER;
/* Cause Tx interrupts on every write back */
- ivar |= (1 << 31);
+ ivar |= BIT(31);
ew32(IVAR, ivar);
@@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
+ vfta |= BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
+ vfta &= ~BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
/* Enable this decision filter in MANC2H */
if (mdef)
- manc2h |= (1 << i);
+ manc2h |= BIT(i);
j |= mdef;
}
@@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
if (er32(MDEF(i)) == 0) {
ew32(MDEF(i), (E1000_MDEF_PORT_623 |
E1000_MDEF_PORT_664));
- manc2h |= (1 << 1);
+ manc2h |= BIT(1);
j++;
break;
}
@@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
tarc |= SPEED_MODE_BIT;
ew32(TARC(0), tarc);
}
@@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
phy_data &= 0xfff8;
- phy_data |= (1 << 2);
+ phy_data |= BIT(2);
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
e1e_rphy(hw, 22, &phy_data);
phy_data &= 0x0fff;
- phy_data |= (1 << 14);
+ phy_data |= BIT(14);
e1e_wphy(hw, 0x10, 0x2823);
e1e_wphy(hw, 0x11, 0x0003);
e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
- int rval;
+ int ret_val;
if (!rar_entries)
break;
- rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
- if (rval < 0)
+ ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (ret_val < 0)
return -ENOMEM;
count++;
}
@@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
- if (!(fextnvm7 & (1 << 0))) {
- ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ if (!(fextnvm7 & BIT(0))) {
+ ew32(FEXTNVM7, fextnvm7 | BIT(0));
e1e_flush();
}
}
@@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
- s32 ret_val;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
@@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
er32(RXSTMPH);
er32(TXSTMPH);
- /* Get and set the System Time Register SYSTIM base frequency */
- ret_val = e1000e_get_base_timinca(adapter, &regval);
- if (ret_val)
- return ret_val;
- ew32(TIMINCA, regval);
-
- /* reset the ns time counter */
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
return 0;
}
@@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/* update thresholds: prefetch threshold to 31, host threshold to 1
* and make sure the granularity is "descriptors" and not "cache lines"
*/
- rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+ rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
ew32(RXDCTL(0), rxdctl);
/* momentarily enable the RX ring for the changes to take effect */
@@ -3885,6 +3874,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
}
/**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp_clock_info;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ u32 timinca;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ if (info->adjfreq) {
+ /* restore the previous ptp frequency delta */
+ ret_val = info->adjfreq(info, adapter->ptp_delta);
+ } else {
+ /* set the default base frequency if no adjustment possible */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (!ret_val)
+ ew32(TIMINCA, timinca);
+ }
+
+ if (ret_val) {
+ dev_warn(&adapter->pdev->dev,
+ "Failed to restore TIMINCA clock rate delta: %d\n",
+ ret_val);
+ return;
+ }
+
+ /* reset the systim ns time counter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ /* restore the previous hwtstamp configuration settings */
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
- /* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ /* restore systim and hwtstamp settings */
+ e1000e_systim_reset(adapter);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4275,7 +4311,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel_1, systimel_2, systimeh;
+ u32 systimel, systimeh;
cycle_t systim, systim_next;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
@@ -4283,24 +4319,25 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
- systimel_1 = er32(SYSTIML);
+ systimel = er32(SYSTIML);
systimeh = er32(SYSTIMH);
- systimel_2 = er32(SYSTIML);
- /* Check for overflow. If there was no overflow, use the values */
- if (systimel_1 < systimel_2) {
- systim = (cycle_t)systimel_1;
- systim |= (cycle_t)systimeh << 32;
- } else {
- /* There was an overflow, read again SYSTIMH, and use
- * systimel_2
- */
- systimeh = er32(SYSTIMH);
- systim = (cycle_t)systimel_2;
- systim |= (cycle_t)systimeh << 32;
+ /* Is systimel is so large that overflow is possible? */
+ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+ u32 systimel_2 = er32(SYSTIML);
+ if (systimel > systimel_2) {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systimel = systimel_2;
+ }
}
+ systim = (cycle_t)systimel;
+ systim |= (cycle_t)systimeh << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 incvalue, time_delta, rem, temp;
+ u64 time_delta, rem, temp;
+ u32 incvalue;
int i;
/* errata for 82574/82583 possible bad bits read from SYSTIMH/L
@@ -4495,7 +4532,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
}
/**
- * e1000_open - Called when a network interface is made active
+ * e1000e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
@@ -4506,7 +4543,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000e_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4641,7 @@ err_setup_tx:
}
/**
- * e1000_close - Disables a network interface
+ * e1000e_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
@@ -4614,7 +4651,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000e_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -6861,7 +6898,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
le16_to_cpus(&buf);
- if (!ret_val && (!(buf & (1 << 0)))) {
+ if (!ret_val && (!(buf & BIT(0)))) {
/* Deep Smart Power Down (DSPD) */
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
@@ -6878,6 +6915,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
@@ -6920,8 +6965,8 @@ static int e1000_set_features(struct net_device *netdev,
}
static const struct net_device_ops e1000e_netdev_ops = {
- .ndo_open = e1000_open,
- .ndo_stop = e1000_close,
+ .ndo_open = e1000e_open,
+ .ndo_stop = e1000e_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_rx_mode = e1000e_set_rx_mode,
@@ -6965,7 +7010,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- s32 rval = 0;
+ s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7245,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B,
1, &eeprom_data);
else
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A,
1, &eeprom_data);
}
/* fetch WoL from EEPROM */
- if (rval)
- e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+ if (ret_val)
+ e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
else if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -7231,13 +7276,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
- rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+ ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
- if (rval) {
- e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+ if (ret_val) {
+ e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
adapter->eeprom_vers = 0;
}
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7256,9 +7304,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* init PTP hardware clock */
- e1000e_ptp_init(adapter);
-
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 49f205c02..2efd80dfd 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = er32(EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aeaca..d78d47b41 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if ((hw->phy.type == e1000_phy_82578) &&
(hw->phy.revision >= 1) &&
(hw->phy.addr == 2) &&
- !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
u16 data2 = 0x7EFF;
ret_val = e1000_access_phy_debug_regs_hv(hw,
- (1 << 6) | 0x3,
+ BIT(6) | 0x3,
&data2, false);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 55bfe4735..3027f63ee 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
+#define BM_WUC_ENABLE_BIT BIT(2)
+#define BM_WUC_HOST_WU_BIT BIT(4)
+#define BM_WUC_ME_WU_BIT BIT(5)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index e2ff3ef75..2e1b17ad5 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ adapter->ptp_delta = delta;
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index b006ff66d..cac645329 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
################################################################################
#
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
#
obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o
fm10k-y := fm10k_main.o \
fm10k_common.o \
fm10k_pci.o \
- fm10k_ptp.o \
fm10k_netdev.o \
fm10k_ethtool.o \
fm10k_pf.o \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb008b..fcf106e54 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -262,12 +259,12 @@ struct fm10k_intfc {
unsigned long state;
u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
-#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
+#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
+#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
+#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
+#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
+#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
int xcast_mode;
/* Tx fast path data */
@@ -333,6 +330,7 @@ struct fm10k_intfc {
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
+ bool lport_map_failed;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@ struct fm10k_intfc {
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
-
#endif /* CONFIG_DEBUG_FS */
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *ptp_clock;
-
- struct sk_buff_head ts_tx_skb_queue;
- u32 tx_hwtstamp_timeouts;
- struct hwtstamp_config ts_config;
- /* We are unable to actually adjust the clock beyond the frequency
- * value. Once the clock is started there is no resetting it. As
- * such we maintain a separate offset from the actual hardware clock
- * to allow for offset adjustment.
- */
- s64 ptp_adjust;
- rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
@@ -510,6 +494,8 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
+u32 fm10k_get_reta_size(struct net_device *netdev);
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
@@ -544,21 +530,6 @@ static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
/* DCB */
#ifdef CONFIG_DCB
void fm10k_dcbnl_set_ops(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 6cfae6ac0..5bbf19cfe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 45e4e5b1f..50f71e997 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 2be436183..db4bd8bf9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5d6137faf..5116fd043 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b57..9c0d87503 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -77,19 +77,6 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
FM10K_STAT("tx_hang_count", tx_timeout_count),
-
- FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
-
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
- FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
- FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
- FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
- FM10K_STAT("rx_switch_errors", rx_switch_errors),
- FM10K_STAT("rx_drops", rx_drops),
- FM10K_STAT("rx_pp_errors", rx_pp_errors),
- FM10K_STAT("rx_link_errors", rx_link_errors),
- FM10K_STAT("rx_length_errors", rx_length_errors),
};
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +108,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+ .stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+ FM10K_QUEUE_STAT("packets", stats.packets),
+ FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
- ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
FM10K_NETDEV_STATS_LEN + \
@@ -145,77 +140,56 @@ enum fm10k_self_test_types {
};
enum {
- FM10K_PRV_FLAG_DEBUG_STATS,
FM10K_PRV_FLAG_LEN,
};
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
- "debug-statistics",
};
+static void fm10k_add_stat_strings(char **p, const char *prefix,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s%s",
+ prefix, stats[i].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
- unsigned int j;
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ char prefix[ETH_GSTRING_LEN];
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- snprintf(p,
- ETH_GSTRING_LEN,
- "vf_%u_%s", i,
- fm10k_gstrings_mbx_stats[j].stat_string);
- p += ETH_GSTRING_LEN;
- }
- }
- }
+ snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -242,7 +216,6 @@ static void fm10k_get_strings(struct net_device *dev,
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -250,19 +223,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return FM10K_TEST_LEN;
case ETH_SS_STATS:
- stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+ stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- stats_len += FM10K_DEBUG_STATS_LEN;
-
- if (iov_data)
- stats_len += FM10K_MBX_STATS_LEN *
- iov_data->num_vfs;
- }
-
return stats_len;
case ETH_SS_PRIV_FLAGS:
return FM10K_PRV_FLAG_LEN;
@@ -271,93 +236,80 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats,
- u64 *data)
+static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
- const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
- struct net_device_stats *net_stats = &netdev->stats;
+ unsigned int i;
char *p;
- int i, j;
- fm10k_update_stats(interface);
-
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ if (!pointer) {
+ /* memory is not zero allocated so we have to clear it */
+ for (i = 0; i < size; i++)
+ *((*data)++) = 0;
+ return;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_global_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ for (i = 0; i < size; i++) {
+ p = (char *)pointer + stats[i].stat_offset;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_debug_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ switch (stats[i].sizeof_stat) {
+ case sizeof(u64):
+ *((*data)++) = *(u64 *)p;
+ break;
+ case sizeof(u32):
+ *((*data)++) = *(u32 *)p;
+ break;
+ case sizeof(u16):
+ *((*data)++) = *(u16 *)p;
+ break;
+ case sizeof(u8):
+ *((*data)++) = *(u8 *)p;
+ break;
+ default:
+ *((*data)++) = 0;
}
}
+}
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- p = (char *)&interface->hw.mbx +
- fm10k_gstrings_mbx_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+static void fm10k_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct net_device_stats *net_stats = &netdev->stats;
+ int i;
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_pf_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ fm10k_update_stats(interface);
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- struct fm10k_vf_info *vf_info;
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- vf_info = &iov_data->vf_info[i];
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- /* skip stats if we don't have a vf info */
- if (!vf_info) {
- data += FM10K_MBX_STATS_LEN;
- continue;
- }
+ fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
+ fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- p = (char *)&vf_info->mbx +
- fm10k_gstrings_mbx_stats[j].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ fm10k_add_ethtool_stats(&data, interface,
+ fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
- u64 *queue_stat;
ring = interface->tx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
ring = interface->rx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -425,7 +377,7 @@ static void fm10k_get_regs(struct net_device *netdev,
u32 *buff = p;
u16 i;
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
switch (hw->mac.type) {
case fm10k_mac_pf:
@@ -935,15 +887,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 attr_flag, test_msg[6];
unsigned long timeout;
- int err;
+ int err = -EINVAL;
/* For now this is a VF only feature */
if (hw->mac.type != fm10k_mac_vf)
return 0;
/* loop through both nested and unnested attribute types */
- for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
- attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
+ for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
+ attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
attr_flag += attr_flag) {
/* generate message to be tested */
fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1001,35 +953,56 @@ static void fm10k_self_test(struct net_device *dev,
static u32 fm10k_get_priv_flags(struct net_device *netdev)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (interface->flags & FM10K_FLAG_DEBUG_STATS)
- priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
-
- return priv_flags;
+ return 0;
}
static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
-
- if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
return -EINVAL;
- if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
- interface->flags |= FM10K_FLAG_DEBUG_STATS;
- else
- interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
return 0;
}
-static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
+{
+ u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
+ struct fm10k_hw *hw = &interface->hw;
+ u32 table[4];
+ int i, j;
+
+ /* record entries to reta table */
+ for (i = 0; i < FM10K_RETA_SIZE; i++) {
+ u32 reta, n;
+
+ /* generate a new table if we weren't given one */
+ for (j = 0; j < 4; j++) {
+ if (indir)
+ n = indir[i + j];
+ else
+ n = ethtool_rxfh_indir_default(i + j, rss_i);
+
+ table[j] = n;
+ }
+
+ reta = table[0] |
+ (table[1] << 8) |
+ (table[2] << 16) |
+ (table[3] << 24);
+
+ if (interface->reta[i] == reta)
+ continue;
+
+ interface->reta[i] = reta;
+ fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
+ }
+}
+
static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1026,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
int i;
u16 rss_i;
@@ -1068,19 +1040,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
return -EINVAL;
}
- /* record entries to reta table */
- for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
- u32 reta = indir[0] |
- (indir[1] << 8) |
- (indir[2] << 16) |
- (indir[3] << 24);
-
- if (interface->reta[i] == reta)
- continue;
-
- interface->reta[i] = reta;
- fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
- }
+ fm10k_write_reta(interface, indir);
return 0;
}
@@ -1145,7 +1105,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev)
/* For QoS report channels per traffic class */
if (tcs > 1)
- max_combined = 1 << (fls(max_combined / tcs) - 1);
+ max_combined = BIT((fls(max_combined / tcs) - 1));
return max_combined;
}
@@ -1192,33 +1152,6 @@ static int fm10k_set_channels(struct net_device *dev,
return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
}
-static int fm10k_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (interface->ptp_clock)
- info->phc_index = ptp_clock_index(interface->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
-
- return 0;
-}
-
static const struct ethtool_ops fm10k_ethtool_ops = {
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
@@ -1246,7 +1179,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
- .get_ts_info = fm10k_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f8..47f0743ec 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
s64 vflre;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
@@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
struct fm10k_iov_data *iov_data;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db38..0e166e9c9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,15 +29,15 @@
#include "fm10k.h"
#define DRV_VERSION "0.19.3-k"
+#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
- "Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2016 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
@@ -401,10 +401,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
}
#define FM10K_RSS_L4_TYPES_MASK \
- ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
- (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV6_UDP))
+ (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV4_UDP) | \
+ BIT(FM10K_RSSTYPE_IPV6_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV6_UDP))
static inline void fm10k_rx_hash(struct fm10k_ring *ring,
union fm10k_rx_desc *rx_desc,
@@ -420,23 +420,10 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
- (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
- FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
- if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
- fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
- le64_to_cpu(rx_desc->q.timestamp));
-}
-
static void fm10k_type_trans(struct fm10k_ring *rx_ring,
union fm10k_rx_desc __maybe_unused *rx_desc,
struct sk_buff *skb)
@@ -486,8 +473,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
- fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +820,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
+ u8 *transport_hdr;
+ __be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
@@ -852,9 +839,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
+ transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
@@ -863,15 +852,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
+ if (likely((transport_hdr - network_hdr.raw) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but ip version=%x!\n",
- protocol);
- }
- tx_ring->tx_stats.csum_err++;
- goto no_csum;
+ break;
}
switch (l4_hdr) {
@@ -884,9 +875,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, version=%d l4 proto=%x\n",
+ protocol, l4_hdr);
}
+ skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
@@ -912,11 +904,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
/* set type for advanced descriptor with frame checksum insertion */
u32 desc_flags = 0;
- /* set timestamping bits */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- desc_flags |= FM10K_TXD_FLAG_TIME;
-
/* set checksum offload bits */
desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
FM10K_TXD_FLAG_CSUM);
@@ -1198,9 +1185,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
* fm10k_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *tx_ring)
+ struct fm10k_ring *tx_ring, int napi_budget)
{
struct fm10k_intfc *interface = q_vector->interface;
struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1226,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1409,7 +1397,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
- avg_wire_size += (1 << itr_round) - 1;
+ avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
/* write back value and retain adaptive flag */
@@ -1449,8 +1437,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- fm10k_for_each_ring(ring, q_vector->tx)
- clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ fm10k_for_each_ring(ring, q_vector->tx) {
+ if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (budget <= 0)
@@ -1468,7 +1458,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
work_done += work;
- clean_complete &= !!(work < per_ring_budget);
+ if (work >= per_ring_budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -1511,17 +1502,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
/* set QoS mask and indices */
f = &interface->ring_feature[RING_F_QOS];
f->indices = pcs;
- f->mask = (1 << fls(pcs - 1)) - 1;
+ f->mask = BIT(fls(pcs - 1)) - 1;
/* determine the upper limit for our current DCB mode */
rss_i = interface->hw.mac.max_queues / pcs;
- rss_i = 1 << (fls(rss_i) - 1);
+ rss_i = BIT(fls(rss_i) - 1);
/* set RSS mask and indices */
f = &interface->ring_feature[RING_F_RSS];
rss_i = min_t(u16, rss_i, f->limit);
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
/* configure pause class to queue mapping */
for (i = 0; i < pcs; i++)
@@ -1551,7 +1542,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
/* record indices and power of 2 mask for RSS */
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
interface->num_rx_queues = rss_i;
interface->num_tx_queues = rss_i;
@@ -1572,17 +1563,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
**/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
{
- /* Start with base case */
- interface->num_rx_queues = 1;
- interface->num_tx_queues = 1;
-
+ /* Attempt to setup QoS and RSS first */
if (fm10k_set_qos_queues(interface))
return;
+ /* If we don't have QoS, just fallback to only RSS. */
fm10k_set_rss_queues(interface);
}
/**
+ * fm10k_reset_num_queues - Reset the number of queues to zero
+ * @interface: board private structure
+ *
+ * This function should be called whenever we need to reset the number of
+ * queues after an error condition.
+ */
+static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
+{
+ interface->num_tx_queues = 0;
+ interface->num_rx_queues = 0;
+ interface->num_q_vectors = 0;
+}
+
+/**
* fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
* @interface: board private structure to initialize
* @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1768,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
return 0;
err_out:
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1788,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
{
int v_idx = interface->num_q_vectors;
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1935,7 +1934,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
static void fm10k_init_reta(struct fm10k_intfc *interface)
{
u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
- u32 reta, base;
+ u32 reta;
/* If the Rx flow indirection table has been configured manually, we
* need to maintain it when possible.
@@ -1960,21 +1959,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
}
repopulate_reta:
- /* Populate the redirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (i = FM10K_RETA_SIZE; i--;) {
- /* first pass generates n and n+2 */
- base = ((i * 0x00040004) + 0x00020000) * rss_i;
- reta = (base & 0x3F803F80) >> 7;
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * rss_i;
- reta |= (base & 0x3F803F80) << 1;
-
- interface->reta[i] = reta;
- }
+ fm10k_write_reta(interface, NULL);
}
/**
@@ -1997,14 +1982,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
if (err) {
dev_err(&interface->pdev->dev,
"Unable to initialize MSI-X capability\n");
- return err;
+ goto err_init_msix;
}
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
if (err) {
- fm10k_reset_msix_capability(interface);
- return err;
+ dev_err(&interface->pdev->dev,
+ "Unable to allocate queue vectors\n");
+ goto err_alloc_q_vectors;
}
/* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2000,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
fm10k_init_reta(interface);
return 0;
+
+err_alloc_q_vectors:
+ fm10k_reset_msix_capability(interface);
+err_init_msix:
+ fm10k_reset_num_queues(interface);
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 98202c3d5..c9dfa6564 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 245a0a3dc..b7dbc8a84 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd71..2a08d3f5b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -243,9 +243,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
for (i = 0; i < interface->num_tx_queues; i++)
fm10k_clean_tx_ring(interface->tx_ring[i]);
-
- /* remove any stale timestamp buffers and free them */
- skb_queue_purge(&interface->ts_tx_skb_queue);
}
/**
@@ -440,7 +437,7 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
* @sa_family: Address family of new port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has added a new port
+ * This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
* number is always added to the tail so that the port number list should
* match the order in which the ports were allocated. The head of the list
@@ -484,7 +481,7 @@ insert_tail:
* @sa_family: Address family of freed port
* @port: port number used for VXLAN
*
- * This funciton is called when a new VXLAN interface has freed a port
+ * This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
* port is removed from the list and the new head is used to determine
* the port number for offloads.
@@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
__skb_put(skb, pad_len);
}
- /* prepare packet for hardware time stamping */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- fm10k_ts_tx_enqueue(interface, skb);
-
if (r_idx >= interface->num_tx_queues)
r_idx %= interface->num_tx_queues;
@@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev,
return -EADDRNOTAVAIL;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev,
u16 vid, glort = interface->glort;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
}
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
}
@@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_vlan(hw, 0, 0, true);
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
@@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return fm10k_setup_tc(dev, tc->tc);
}
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return fm10k_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return fm10k_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
@@ -1402,7 +1377,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
.ndo_add_vxlan_port = fm10k_add_vxlan_port,
.ndo_del_vxlan_port = fm10k_del_vxlan_port,
- .ndo_do_ioctl = fm10k_ioctl,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1429,7 +1403,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
/* configure default debug level */
interface = netdev_priv(dev);
- interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
/* configure default features */
dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6fa6..e05aca9be 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
@@ -145,7 +145,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
usleep_range(1000, 2000);
@@ -209,9 +209,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- /* reset clock */
- fm10k_ts_reset(interface);
-
err = netif_running(netdev) ? fm10k_open(netdev) : 0;
if (err)
goto err_open;
@@ -559,7 +556,6 @@ static void fm10k_service_task(struct work_struct *work)
/* tasks only run when interface is up */
fm10k_watchdog_subtask(interface);
fm10k_check_hang_subtask(interface);
- fm10k_ts_tx_subtask(interface);
/* release lock on service events to allow scheduling next event */
fm10k_service_event_complete(interface);
@@ -579,7 +575,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
u64 tdba = ring->dma;
u32 size = ring->count * sizeof(struct fm10k_tx_desc);
u32 txint = FM10K_INT_MAP_DISABLE;
- u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
+ u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
@@ -730,7 +726,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
if (interface->pfc_en)
rx_pause = interface->pfc_en;
#endif
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +775,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u8 reg_idx = ring->reg_idx;
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +899,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
/* service upstream mailbox */
if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1061,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
if (maxholdq)
fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
for (q = 255;;) {
- if (maxholdq & (1 << 31)) {
+ if (maxholdq & BIT(31)) {
if (q < FM10K_MAX_QUEUES_PF) {
interface->rx_overrun_pf++;
fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1131,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
return IRQ_HANDLED;
}
void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
{
- struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
struct fm10k_hw *hw = &interface->hw;
+ struct msix_entry *entry;
int itr_reg;
/* no mailbox IRQ to free if MSI-X is not enabled */
if (!interface->msix_entries)
return;
+ entry = &interface->msix_entries[FM10K_MBX_VECTOR];
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1202,25 +1200,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_intfc *interface;
- u64 timestamp;
- s32 err;
-
- err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
- &timestamp);
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
- return 0;
-}
-
/* generic error handler for mailbox issues */
static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info __always_unused *mbx)
@@ -1241,7 +1220,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
- FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1253,7 +1231,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
+ u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1285,11 +1263,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
u32 dglort_map = hw->mac.dglort_map;
s32 err;
+ interface = container_of(hw, struct fm10k_intfc, hw);
+
+ err = fm10k_msg_err_pf(hw, results, mbx);
+ if (!err && hw->swapi.status) {
+ /* force link down for a reasonable delay */
+ interface->link_down_event = jiffies + (2 * HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* reset dglort_map back to no config */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ fm10k_service_event_schedule(interface);
+
+ /* prevent overloading kernel message buffer */
+ if (interface->lport_map_failed)
+ return 0;
+
+ interface->lport_map_failed = true;
+
+ if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+ dev_warn(&interface->pdev->dev,
+ "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+ dev_warn(&interface->pdev->dev,
+ "request logical port map failed: %d\n",
+ hw->swapi.status);
+
+ return 0;
+ }
+
err = fm10k_msg_lport_map_pf(hw, results, mbx);
if (err)
return err;
- interface = container_of(hw, struct fm10k_intfc, hw);
+ interface->lport_map_failed = false;
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
@@ -1339,68 +1346,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_swapi_1588_timestamp timestamp;
- struct fm10k_iov_data *iov_data;
- struct fm10k_intfc *interface;
- u16 sglort, vf_idx;
- s32 err;
-
- err = fm10k_tlv_attr_get_le_struct(
- results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
- &timestamp, sizeof(timestamp));
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- if (timestamp.dglort) {
- fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
- le64_to_cpu(timestamp.egress));
- return 0;
- }
-
- /* either dglort or sglort must be set */
- if (!timestamp.sglort)
- return FM10K_ERR_PARAM;
-
- /* verify GLORT is at least one of the ones we own */
- sglort = le16_to_cpu(timestamp.sglort);
- if (!fm10k_glort_valid_pf(hw, sglort))
- return FM10K_ERR_PARAM;
-
- if (sglort == interface->glort) {
- fm10k_ts_tx_hwtstamp(interface, 0,
- le64_to_cpu(timestamp.ingress));
- return 0;
- }
-
- /* if there is no iov_data then there is no mailboxes to process */
- if (!ACCESS_ONCE(interface->iov_data))
- return FM10K_ERR_PARAM;
-
- rcu_read_lock();
-
- /* notify VF if this timestamp belongs to it */
- iov_data = interface->iov_data;
- vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
- if (!iov_data || vf_idx >= iov_data->num_vfs) {
- err = FM10K_ERR_PARAM;
- goto err_unlock;
- }
-
- err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
- le64_to_cpu(timestamp.ingress));
-
-err_unlock:
- rcu_read_unlock();
-
- return err;
-}
-
static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1408,7 +1353,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
- FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1420,8 +1364,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
- u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
+ u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
+ u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1654,6 +1598,7 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
+ int err;
/* signal that we are down to the interrupt handler and service task */
set_bit(__FM10K_DOWN, &interface->state);
@@ -1678,7 +1623,9 @@ void fm10k_down(struct fm10k_intfc *interface)
fm10k_update_stats(interface);
/* Disable DMA engine for Tx/Rx */
- hw->mac.ops.stop_hw(hw);
+ err = hw->mac.ops.stop_hw(hw);
+ if (err)
+ dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
@@ -1776,35 +1723,17 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
return -EIO;
}
- /* assign BAR 4 resources for use with PTP */
- if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
- interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
- pci_resource_len(pdev, 4));
- hw->sw_addr = interface->sw_addr;
-
/* initialize DCBNL interface */
fm10k_dcbnl_set_ops(netdev);
- /* Initialize service timer and service task */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- setup_timer(&interface->service_timer, &fm10k_service_timer,
- (unsigned long)interface);
- INIT_WORK(&interface->service_task, fm10k_service_task);
-
- /* kick off service timer now, even when interface is down */
- mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
-
- /* Intitialize timestamp data */
- fm10k_ts_init(interface);
-
/* set default ring sizes */
interface->tx_ring_count = FM10K_DEFAULT_TXD;
interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -1987,6 +1916,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* the mbx interrupt might attempt to schedule the service task, so we
+ * must ensure it is disabled since we haven't yet requested the timer
+ * or work item.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_interrupt;
@@ -2006,8 +1941,15 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* stop all the transmit queues from transmitting until link is up */
netif_tx_stop_all_queues(netdev);
- /* Register PTP interface */
- fm10k_ptp_register(interface);
+ /* Initialize service timer and service task late in order to avoid
+ * cleanup issues.
+ */
+ setup_timer(&interface->service_timer, &fm10k_service_timer,
+ (unsigned long)interface);
+ INIT_WORK(&interface->service_task, fm10k_service_task);
+
+ /* kick off service timer now, even when interface is down */
+ mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
@@ -2065,9 +2007,6 @@ static void fm10k_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- /* cleanup timestamp handling */
- fm10k_ptp_unregister(interface);
-
/* release VFs */
fm10k_iov_disable(pdev);
@@ -2140,9 +2079,6 @@ static int fm10k_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
- /* reset clock */
- fm10k_ts_reset(interface);
-
rtnl_lock();
err = fm10k_init_queueing_scheme(interface);
@@ -2259,15 +2195,17 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
+ rtnl_lock();
+
if (netif_running(netdev))
fm10k_close(netdev);
+ fm10k_mbx_free_irq(interface);
+
/* free interrupts */
fm10k_clear_queueing_scheme(interface);
- fm10k_mbx_free_irq(interface);
-
- pci_disable_device(pdev);
+ rtnl_unlock();
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2337,27 +2275,31 @@ static void fm10k_io_resume(struct pci_dev *pdev)
/* reset statistics starting values */
hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ rtnl_lock();
+
err = fm10k_init_queueing_scheme(interface);
if (err) {
dev_err(&interface->pdev->dev,
"init_queueing_scheme failed: %d\n", err);
- return;
+ goto unlock;
}
/* reassociate interrupts */
fm10k_mbx_request_irq(interface);
- /* reset clock */
- fm10k_ts_reset(interface);
-
+ rtnl_lock();
if (netif_running(netdev))
err = fm10k_open(netdev);
+ rtnl_unlock();
/* final check of hardware state before registering the interface */
err = err ? : fm10k_hw_ready(interface);
if (!err)
netif_device_attach(netdev);
+
+unlock:
+ rtnl_unlock();
}
static const struct pci_error_handlers fm10k_err_handler = {
@@ -2382,7 +2324,7 @@ static struct pci_driver fm10k_driver = {
/**
* fm10k_register_pci_driver - register driver interface
*
- * This funciton is called on module load in order to register the driver.
+ * This function is called on module load in order to register the driver.
**/
int fm10k_register_pci_driver(void)
{
@@ -2392,7 +2334,7 @@ int fm10k_register_pci_driver(void)
/**
* fm10k_unregister_pci_driver - unregister driver interface
*
- * This funciton is called on module unload in order to remove the driver.
+ * This function is called on module unload in order to remove the driver.
**/
void fm10k_unregister_pci_driver(void)
{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8cf943db5..dc75507c9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -219,8 +219,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +488,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -527,8 +531,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* determine count of VSIs and queues */
- queue_count = 1 << (dglort->rss_l + dglort->pc_l);
- vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
glort = dglort->glort;
q_idx = dglort->queue_b;
@@ -544,8 +548,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
}
/* determine count of PCs and queues */
- queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
- pc_count = 1 << dglort->pc_l;
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
/* configure PC for Tx queues */
for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +715,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
FM10K_RXDCTL_DROP_ON_EMPTY);
fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
- FM10K_RXQCTL_VF |
- (i << FM10K_RXQCTL_VF_SHIFT));
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
/* map queue pair to VF */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -864,9 +868,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -952,7 +960,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* clear event notification of VF FLR */
- fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+ fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
/* force timeout and then disconnect the mailbox */
vf_info->mbx.timeout = 0;
@@ -987,7 +995,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
(vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
- rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
/* stop further DMA and reset queue ownership back to VF */
for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1140,19 +1148,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
fm10k_update_hw_stats_q(hw, q, idx, qpp);
}
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
- struct fm10k_vf_info *vf_info,
- u64 timestamp)
-{
- u32 msg[4];
-
- /* generate port state response to notify VF it is not ready */
- fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
- fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
- return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
/**
* fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
* @hw: Pointer to hardware structure
@@ -1384,7 +1379,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
/* if mode is not currently enabled, enable it */
- if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
/* swap mode back to a bit flag */
@@ -1618,7 +1613,7 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
@@ -1647,6 +1642,8 @@ out:
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -1787,89 +1784,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
return 0;
}
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
- FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
- sizeof(struct fm10k_swapi_1588_timestamp)),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- * fm10k_adjust_systime_pf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function will adjust the SYSTIME_CFG register contained in BAR 4
- * if this function is supported for BAR 4 access. The adjustment amount
- * is based on the parts per billion value provided and adjusted to a
- * value based on parts per 2^48 clock cycles.
- *
- * If adjustment is not supported or the requested value is too large
- * we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
- u64 systime_adjust;
-
- /* if sw_addr is not set we don't have switch register access */
- if (!hw->sw_addr)
- return ppb ? FM10K_ERR_PARAM : 0;
-
- /* we must convert the value from parts per billion to parts per
- * 2^48 cycles. In addition I have opted to only use the 30 most
- * significant bits of the adjustment value as the 8 least
- * significant bits are located in another register and represent
- * a value significantly less than a part per billion, the result
- * of dropping the 8 least significant bits is that the adjustment
- * value is effectively multiplied by 2^8 when we write it.
- *
- * As a result of all this the math for this breaks down as follows:
- * ppb / 10^9 == adjust * 2^8 / 2^48
- * If we solve this for adjust, and simplify it comes out as:
- * ppb * 2^31 / 5^9 == adjust
- */
- systime_adjust = (ppb < 0) ? -ppb : ppb;
- systime_adjust <<= 31;
- do_div(systime_adjust, 1953125);
-
- /* verify the requested adjustment value is in range */
- if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
- return FM10K_ERR_PARAM;
-
- if (ppb > 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
- return 0;
-}
-
-/**
- * fm10k_read_systime_pf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanosecods. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1899,8 +1813,6 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
- .adjust_systime = fm10k_adjust_systime_pf,
- .read_systime = fm10k_read_systime_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1912,7 +1824,6 @@ static const struct fm10k_iov_ops iov_ops_pf = {
.set_lport = fm10k_iov_set_lport_pf,
.reset_lport = fm10k_iov_reset_lport_pf,
.update_stats = fm10k_iov_update_stats_pf,
- .report_timestamp = fm10k_iov_report_timestamp_pf,
};
static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index b2d96b45c..3336d3c10 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 {
FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
- FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
- FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
};
enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 {
FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
FM10K_PF_ATTR_ID_PORT = 0x0C,
FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
- FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
};
#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
@@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
@@ -100,13 +99,6 @@ struct fm10k_swapi_error {
struct fm10k_global_table_data ffu;
} __aligned(4) __packed;
-struct fm10k_swapi_1588_timestamp {
- __le64 egress;
- __le64 ingress;
- __le16 dglort;
- __le16 sglort;
-} __aligned(4) __packed;
-
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
- fm10k_1588_timestamp_msg_attr, func)
-
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644
index b4945e8ab..000000000
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT (HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime)
-{
- unsigned long flags;
-
- read_lock_irqsave(&interface->systime_lock, flags);
- systime += interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
- __le16 dglort)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
-
- skb_queue_walk(list, skb) {
- if (FM10K_CB(skb)->fi.w.dglort == dglort)
- return skb;
- }
-
- return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *clone;
- unsigned long flags;
-
- /* create clone for us to return on the Tx path */
- clone = skb_clone_sk(skb);
- if (!clone)
- return;
-
- FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate any buffers with the same dglort,
- * if none are present then insert skb in tail of list
- */
- skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb) {
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- __skb_queue_tail(list, clone);
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if list is already has one then we just free the clone */
- if (skb)
- dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime)
-{
- struct skb_shared_hwtstamps shhwtstamps;
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate and pull the sk_buff out of the list */
- skb = fm10k_ts_tx_skb(interface, dglort);
- if (skb)
- __skb_unlink(skb, list);
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if not found do nothing */
- if (!skb)
- return;
-
- /* timestamp the sk_buff and free out copy */
- fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
- return;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* walk though the list and flush any expired timestamp packets */
- skb_queue_walk_safe(list, skb, tmp) {
- if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
- continue;
- __skb_unlink(skb, list);
- kfree_skb(skb);
- interface->tx_hwtstamp_timeouts++;
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
- struct fm10k_hw *hw = &interface->hw;
-
- return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
- s64 ns = ktime_to_ns(ktime_get_real());
- unsigned long flags;
-
- /* reinitialize the clock */
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
- /* Initialize lock protecting systime access */
- rwlock_init(&interface->systime_lock);
-
- /* Initialize skb queue for pending timestamp requests */
- skb_queue_head_init(&interface->ts_tx_skb_queue);
-
- /* reset the clock to current kernel time */
- fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config *config = &interface->ts_config;
-
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config ts_config;
-
- if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (ts_config.flags)
- return -EINVAL;
-
- switch (ts_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- /* we likely need some check here to see if this is supported */
- break;
- default:
- return -ERANGE;
- }
-
- switch (ts_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_ALL:
- interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
- ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- /* save these settings for future reference */
- interface->ts_config = ts_config;
-
- return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
- -EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- int err;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- err = hw->mac.ops.adjust_systime(hw, ppb);
-
- /* the only error we should see is if the value is out of range */
- return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust += delta;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 now;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- read_lock_irqsave(&interface->systime_lock, flags);
- now = fm10k_systime_read(interface) + interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- *ts = ns_to_timespec64(now);
-
- return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 ns = timespec64_to_ns(ts);
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int __always_unused on)
-{
- struct ptp_clock_time *t = &rq->perout.period;
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- u64 period;
- u32 step;
-
- /* we can only support periodic output */
- if (rq->type != PTP_CLK_REQ_PEROUT)
- return -EINVAL;
-
- /* verify the requested channel is there */
- if (rq->perout.index >= ptp->n_per_out)
- return -EINVAL;
-
- /* we cannot enforce start time as there is no
- * mechanism for that in the hardware, we can only control
- * the period.
- */
-
- /* we cannot support periods greater than 4 seconds due to reg limit */
- if (t->sec > 4 || t->sec < 0)
- return -ERANGE;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- /* we simply cannot support the operation if we don't have BAR4 */
- if (!hw->sw_addr)
- return -ENOTSUPP;
-
- /* convert to unsigned 64b ns, verify we can put it in a 32b register */
- period = t->sec * 1000000000LL + t->nsec;
-
- /* determine the minimum size for period */
- step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
- FM10K_SYSTIME_CFG_STEP_MASK);
-
- /* verify the value is in range supported by hardware */
- if ((period && (period < step)) || (period > U32_MAX))
- return -ERANGE;
-
- /* notify hardware of request to being sending pulses */
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
- (u32)period);
-
- return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
- {
- .name = "IEEE1588_PULSE0",
- .index = 0,
- .func = PTP_PF_PEROUT,
- .chan = 0
- },
- {
- .name = "IEEE1588_PULSE1",
- .index = 1,
- .func = PTP_PF_PEROUT,
- .chan = 1
- }
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- /* verify the requested pin is there */
- if (pin >= ptp->n_pins || !ptp->pin_config)
- return -EINVAL;
-
- /* enforce locked channels, no changing them */
- if (chan != ptp->pin_config[pin].chan)
- return -EINVAL;
-
- /* we want to keep the functions locked as well */
- if (func != ptp->pin_config[pin].func)
- return -EINVAL;
-
- return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
- struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
- struct device *dev = &interface->pdev->dev;
- struct ptp_clock *ptp_clock;
-
- snprintf(ptp_caps->name, sizeof(ptp_caps->name),
- "%s", interface->netdev->name);
- ptp_caps->owner = THIS_MODULE;
- /* This math is simply the inverse of the math in
- * fm10k_adjust_systime_pf applied to an adjustment value
- * of 2^30 - 1 which is the maximum value of the register:
- * max_ppb == ((2^30 - 1) * 5^9) / 2^31
- */
- ptp_caps->max_adj = 976562;
- ptp_caps->adjfreq = fm10k_ptp_adjfreq;
- ptp_caps->adjtime = fm10k_ptp_adjtime;
- ptp_caps->gettime64 = fm10k_ptp_gettime;
- ptp_caps->settime64 = fm10k_ptp_settime;
-
- /* provide pins if BAR4 is accessible */
- if (interface->sw_addr) {
- /* enable periodic outputs */
- ptp_caps->n_per_out = 2;
- ptp_caps->enable = fm10k_ptp_enable;
-
- /* enable clock pins */
- ptp_caps->verify = fm10k_ptp_verify;
- ptp_caps->n_pins = 2;
- ptp_caps->pin_config = fm10k_ptp_pd;
- }
-
- ptp_clock = ptp_clock_register(ptp_caps, dev);
- if (IS_ERR(ptp_clock)) {
- ptp_clock = NULL;
- dev_err(dev, "ptp_clock_register failed\n");
- } else {
- dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
- }
-
- interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
- struct ptp_clock *ptp_clock = interface->ptp_clock;
- struct device *dev = &interface->pdev->dev;
-
- if (!ptp_clock)
- return;
-
- interface->ptp_clock = NULL;
-
- ptp_clock_unregister(ptp_clock);
- dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb307..f8e87bf08 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
if (len < 4) {
- attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
} else {
attr[1] = (u32)value;
if (len > 4)
@@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
@@ -652,29 +654,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
**/
static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
{
- if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
test_str);
- if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
test_mac, test_vlan);
- if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
- if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
- if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
- if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
- if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
- if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
- if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
- if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
- if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
test_le, 8);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index e1845e0a1..a1f1027fe 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb190..b8bc06183 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@ struct fm10k_hw;
#define FM10K_STATS_LOOPBACK_DROP 0x3806
#define FM10K_STATS_NODESC_DROP 0x3807
-/* Timesync registers */
-#define FM10K_SYSTIME 0x3814
-#define FM10K_SYSTIME_CFG 0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
-
/* PCIe state registers */
#define FM10K_PHYADDR 0x381C
@@ -355,6 +350,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -381,12 +377,6 @@ struct fm10k_hw;
#define FM10K_VFSYSTIME 0x00040
#define FM10K_VFITR(_n) ((_n) + 0x00060)
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST 0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
-
enum fm10k_int_source {
fm10k_int_mailbox = 0,
fm10k_int_pcie_fault = 1,
@@ -550,8 +540,6 @@ struct fm10k_mac_ops {
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
- s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
- u64 (*read_systime)(struct fm10k_hw *);
};
enum fm10k_mac_type {
@@ -617,10 +605,10 @@ struct fm10k_vf_info {
*/
};
-#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
-#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
-#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
-#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
@@ -643,7 +631,6 @@ struct fm10k_iov_ops {
s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
- s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
};
struct fm10k_iov_info {
@@ -667,7 +654,6 @@ struct fm10k_info {
struct fm10k_hw {
u32 __iomem *hw_addr;
- u32 __iomem *sw_addr;
void *back;
struct fm10k_mac_info mac;
struct fm10k_bus_info bus;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 91f8d7311..3b06685ea 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -188,7 +188,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -228,7 +228,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
ether_addr_copy(hw->mac.perm_addr, perm_addr);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return 0;
}
@@ -451,13 +451,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
- FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
/**
* fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
* @hw: pointer to hardware structure
@@ -509,52 +502,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
return 0;
}
-/**
- * fm10k_adjust_systime_vf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function takes an adjustment rate in parts per billion and will
- * verify that this value is 0 as the VF cannot support adjusting the
- * systime clock.
- *
- * If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
- /* The VF cannot adjust the clock frequency, however it should
- * already have a syntonic clock with whichever host interface is
- * running as the master for the host interface clock domain so
- * there should be not frequency adjustment necessary.
- */
- return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- * fm10k_read_systime_vf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanoseconds. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +526,6 @@ static const struct fm10k_mac_ops mac_ops_vf = {
.rebind_hw_stats = fm10k_rebind_hw_stats_vf,
.configure_dglort_map = fm10k_configure_dglort_map_vf,
.get_host_state = fm10k_get_host_state_generic,
- .adjust_systime = fm10k_adjust_systime_vf,
- .read_systime = fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index c4439f131..2662f33c0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id {
FM10K_VF_MSG_ID_MSIX,
FM10K_VF_MSG_ID_MAC_VLAN,
FM10K_VF_MSG_ID_LPORT_STATE,
- FM10K_VF_MSG_ID_1588,
FM10K_VF_MSG_ID_MAX,
};
@@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id {
FM10K_LPORT_STATE_MSG_MAX
};
-enum fm10k_tlv_1588_attr_id {
- FM10K_1588_MSG_TIMESTAMP,
- FM10K_1588_MSG_MAX
-};
-
#define FM10K_VF_MSG_MSIX_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
@@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
fm10k_lport_state_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
extern const struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c04..9c44739da 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -97,12 +97,12 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_PS BIT(4)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -112,7 +112,9 @@
#define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24
-#define I40E_PHY_DEBUG_PORT BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -123,10 +125,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \
- ((ring_is_16byte_desc_enabled(R)) \
- ? (union i40e_32byte_rx_desc *) \
- (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
- : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -202,6 +201,7 @@ struct i40e_lump_tracking {
#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -244,7 +244,6 @@ struct i40e_fdir_filter {
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
-#define I40E_MAX_USER_PRIORITY 8
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -320,8 +319,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
@@ -330,7 +327,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
@@ -363,6 +359,7 @@ struct i40e_pf {
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -534,9 +531,7 @@ struct i40e_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
@@ -554,7 +549,7 @@ struct i40e_vsi {
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
- u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
@@ -811,6 +806,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +819,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd6a..738b42a44 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -624,13 +614,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = false;
+ hw->nvm_release_on_done = false;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = 0;
/* success! */
@@ -1023,26 +1009,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = false;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbddea..d92aad38a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8d5c65ab6..11cf1a5eb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1857,7 +1833,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -1927,9 +1906,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2226,13 +2205,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
*/
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+ BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
@@ -2250,7 +2227,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
- (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2303,7 +2280,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2300,13 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index bf6b453d9..a4601d97f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type;
- struct i40e_client_ops *ops; /* client ops provided by the client */
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
};
static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294c2..422b41d61 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,8 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -694,7 +696,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1901,13 +1903,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- enum i40e_status_code status;
+ i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1970,10 +1972,12 @@ aq_add_vsi_exit:
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1986,8 +1990,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2037,6 +2042,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2157,6 +2232,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2246,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
return status;
}
@@ -2205,6 +2286,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = cpu_to_le16(flags);
+ scfg->valid_flags = cpu_to_le16(valid_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_get_firmware_version
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
@@ -2660,10 +2770,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -2780,36 +2887,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3073,6 +3150,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
@@ -3128,6 +3208,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733d2..e6af8c8d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -147,9 +147,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- if (vsi->active_vlans)
- dev_info(&pf->pdev->dev,
- " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags,
@@ -269,13 +268,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, rx_ring->rx_hdr_len,
- rx_ring->rx_buf_len,
- rx_ring->dtype);
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, ring_is_ps_enabled(rx_ring),
+ " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
@@ -327,9 +324,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, tx_ring->dtype);
- dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
tx_ring->next_to_use,
@@ -366,8 +360,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" work_limit = %d\n",
vsi->work_limit);
dev_info(&pf->pdev->dev,
- " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
- vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ " max_frame = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -592,13 +586,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
" d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, i);
- dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx\n",
- i, rxd->read.pkt_addr,
- rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
@@ -620,13 +607,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, desc_n);
- dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fcd1..d701861c6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b16594..5e8d84ff7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,12 +230,22 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+ "MFP",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
+ "hw-atr-eviction",
+ "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
"LinkPolling",
"flow-director-atr",
"veb-stats",
- "packet-split",
"hw-atr-eviction",
};
@@ -252,6 +262,110 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+ u32 *advertising)
+{
+ enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+
+ *supported = 0x0;
+ *advertising = 0x0;
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ *supported |= SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ *supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ *supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_40000baseCR4_Full;
+ }
+ if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
+ !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg |
+ ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ *supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ *supported |= SUPPORTED_40000baseLR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ *supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ *supported |= SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKR_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ *supported |= SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ecmd: ethtool command to fill in
@@ -265,6 +379,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
+ u32 e_advertising = 0x0;
+ u32 e_supported = 0x0;
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
@@ -305,14 +421,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ecmd->supported = SUPPORTED_Autoneg |
@@ -320,12 +440,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full;
break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ecmd->supported = SUPPORTED_Autoneg |
@@ -352,14 +466,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising |= ADVERTISED_100baseT_Full;
}
break;
- /* Backplane is set based on supported phy types in get_settings
- * so don't set anything here but don't warn either
- */
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -367,6 +490,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
hw_link_info->phy_type);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and them to
+ * get what is truly supported
+ */
+ i40e_phy_type_to_ethtool(pf, &e_supported,
+ &e_advertising);
+
+ ecmd->supported = ecmd->supported & e_supported;
+ ecmd->advertising = ecmd->advertising & e_advertising;
+
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
@@ -401,74 +534,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
struct i40e_pf *pf)
{
- enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- ecmd->supported = 0x0;
- ecmd->advertising = 0x0;
- if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- if (pf->hw.mac.type == I40E_MAC_X722) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
- }
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XFI ||
- phy_types & I40E_CAP_PHY_TYPE_SFI ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- ecmd->supported |= SUPPORTED_40000baseCR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
- }
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- ecmd->supported |= SUPPORTED_40000baseSR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+ i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+ &ecmd->advertising);
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -497,38 +567,6 @@ static int i40e_get_settings(struct net_device *netdev,
i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
-
- /* For backplane, supported and advertised are only reliant on the
- * phy types the NVM specifies are supported.
- */
- if (hw->device_id == I40E_DEV_ID_KX_B ||
- hw->device_id == I40E_DEV_ID_KX_C ||
- hw->device_id == I40E_DEV_ID_20G_KR2 ||
- hw->device_id == I40E_DEV_ID_20G_KR2_A) {
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- ecmd->supported |= SUPPORTED_40000baseKR4_Full;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- ecmd->supported |= SUPPORTED_20000baseKR2_Full;
- ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- ecmd->advertising |= ADVERTISED_10000baseKR_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
- }
- }
-
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -1143,6 +1181,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ if (pf->hw.pf_id == 0)
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1259,6 +1301,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ u32 __iomem faketail = 0;
+ struct i40e_ring *ring;
+ u16 unused;
+
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
@@ -1267,12 +1316,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ rx_rings[i].tail = (u8 __iomem *)&faketail;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* now allocate the Rx buffers to make sure the OS
+ * has enough memory, any failure here means abort
+ */
+ ring = &rx_rings[i];
+ unused = I40E_DESC_UNUSED(ring);
+ err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
if (err) {
- while (i) {
- i--;
+ do {
i40e_free_rx_resources(&rx_rings[i]);
- }
+ } while (i--);
kfree(rx_rings);
rx_rings = NULL;
@@ -1298,6 +1357,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(vsi->rx_rings[i]);
+ /* get the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ /* do a struct copy */
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
@@ -1342,7 +1412,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- return I40E_PRIV_FLAGS_STR_LEN;
+ if (pf->hw.pf_id == 0)
+ return I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1540,10 +1613,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0) {
+ for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings_gl[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ } else {
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
}
break;
default:
@@ -1714,7 +1795,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ i40e_close(netdev);
else
/* This reset does not affect link - if it is
* changed to a type of reset that does affect
@@ -1743,7 +1824,7 @@ static void i40e_diag_test(struct net_device *netdev,
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
- dev_open(netdev);
+ i40e_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1837,7 +1918,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2490,7 +2571,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!vsi)
return -EINVAL;
-
pf = vsi->back;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2548,15 +2628,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
if (ntohl(fsp->m_ext.data[1])) {
- if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ if (vf_id >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid VF id %d\n", vf_id);
goto free_input;
}
- vf_id = ntohl(fsp->h_ext.data[1]);
/* Find vsi id from vf id and override dest vsi */
input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid queue id %d for VF %d\n",
+ input->q_index, vf_id);
goto free_input;
}
}
@@ -2803,18 +2886,18 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_pf *pf = vsi->back;
u32 ret_flags = 0;
- ret_flags |= pf->hw.func_caps.npar_enable ?
- I40E_PRIV_FLAGS_NPAR_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
- I40E_PRIV_FLAGS_PS : 0;
ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+ if (pf->hw.pf_id == 0) {
+ ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+ I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ }
return ret_flags;
}
@@ -2829,27 +2912,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 sw_flags = 0, valid_flags = 0;
bool reset_required = false;
+ bool promisc_change = false;
+ int ret;
/* NOTE: MFP is not settable */
- /* allow the user to control the method of receive
- * buffer DMA, whether the packet is split at header
- * boundaries into two separate buffers. In some cases
- * one routine or the other will perform better.
- */
- if ((flags & I40E_PRIV_FLAGS_PS) &&
- !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_PS) &&
- (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- }
-
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else
@@ -2876,6 +2945,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
reset_required = true;
}
+ if (pf->hw.pf_id == 0) {
+ if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ }
+ }
+ if (promisc_change) {
+ if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c16..58e6c1570 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
#include "i40e_fcoe.h"
/**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
@@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 5ebe12d56..a7c7b1d9b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
- i40e_status ret_code;
+ i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 344912957..501f15d9f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -45,8 +45,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 16
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +90,8 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
@@ -326,7 +328,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
unsigned long trans_start;
q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start ? : netdev->trans_start;
+ trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +398,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
-}
-
-/**
* i40e_get_vsi_stats_struct - Get System Network Statistics
* @vsi: the VSI we care about
*
@@ -1360,6 +1344,13 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -2097,6 +2088,12 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
+ /* if the VF is not trusted do not do promisc */
+ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ goto out;
+ }
+
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
@@ -2138,7 +2135,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
&vsi->back->hw,
vsi->seid,
- cur_promisc, NULL);
+ cur_promisc, NULL,
+ true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
@@ -2160,18 +2158,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
aq_ret, pf->hw.aq.asq_last_status);
}
}
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
- }
}
out:
/* if something went wrong then set the changed flag so we try again */
@@ -2865,34 +2851,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
memset(&rx_ctx, 0, sizeof(rx_ctx));
ring->rx_buf_len = vsi->rx_buf_len;
- ring->rx_hdr_len = vsi->rx_hdr_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
- rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
- set_ring_16byte_desc_enabled(ring);
- rx_ctx.dsize = 0;
- } else {
- rx_ctx.dsize = 1;
- }
+ /* use 32 byte descriptors */
+ rx_ctx.dsize = 1;
- rx_ctx.dtype = vsi->dtype;
- if (vsi->dtype) {
- set_ring_ps_enabled(ring);
- rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
- I40E_RX_SPLIT_IP |
- I40E_RX_SPLIT_TCP_UDP |
- I40E_RX_SPLIT_SCTP;
- } else {
- rx_ctx.hsplit_0 = 0;
- }
+ /* descriptor type is always zero
+ * rx_ctx.dtype = 0;
+ */
+ rx_ctx.hsplit_0 = 0;
- rx_ctx.rxmax = min_t(u16, vsi->max_frame,
- (chain_len * ring->rx_buf_len));
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
@@ -2929,12 +2902,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring_is_ps_enabled(ring)) {
- i40e_alloc_rx_headers(ring);
- i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
- } else {
- i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
- }
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
return 0;
}
@@ -2973,40 +2941,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
else
vsi->max_frame = I40E_RXBUFFER_2048;
- /* figure out correct receive buffer length */
- switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
- I40E_FLAG_RX_PS_ENABLED)) {
- case I40E_FLAG_RX_1BUF_ENABLED:
- vsi->rx_hdr_len = 0;
- vsi->rx_buf_len = vsi->max_frame;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
- break;
- case I40E_FLAG_RX_PS_ENABLED:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
- break;
- default:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
- break;
- }
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */
- vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -4164,7 +4110,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
@@ -5509,11 +5455,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
*
* Returns 0, this is not allowed to fail
**/
-#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5480,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
- if (i40e_check_asq_alive(&pf->hw))
- i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -6377,7 +6317,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event 0x%04x received\n",
+ "ARQ: Unknown event 0x%04x ignored\n",
opcode);
break;
}
@@ -6742,6 +6682,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return;
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
@@ -6862,6 +6804,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
*/
ret = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7525,10 +7468,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
rx_ring->count = vsi->num_desc;
rx_ring->size = 0;
rx_ring->dcb_tc = 0;
- if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
- set_ring_16byte_desc_enabled(rx_ring);
- else
- clear_ring_16byte_desc_enabled(rx_ring);
rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
@@ -7782,10 +7721,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
@@ -7796,7 +7736,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7820,8 +7761,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7831,10 +7771,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -8084,24 +8029,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
u8 i;
/* Fill out hash function seed */
if (seed) {
u32 *seed_dw = (u32 *)seed;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+ seed_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HKEY1(i, vf_id),
+ seed_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+ }
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
- if (lut_size != I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HLUT1(i, vf_id),
+ lut_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
}
i40e_flush(hw);
@@ -8440,7 +8406,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
- pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -8451,14 +8416,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
- if (iommu_present(&pci_bus_type))
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- else
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -9074,6 +9033,7 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
+ .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
#if IS_ENABLED(CONFIG_VXLAN)
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
@@ -9114,40 +9074,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
0;
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_HIGHDMA |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- 0;
+ if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->features |= NETIF_F_NTUPLE;
- if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ netdev->hw_features |= netdev->hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9163,6 +9127,12 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
I40E_VLAN_ANY, false, true);
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
+ } else if ((pf->hw.aq.api_maj_ver > 1) ||
+ ((pf->hw.aq.api_maj_ver == 1) &&
+ (pf->hw.aq.api_min_ver > 4))) {
+ /* Supported in FW API version higher than 1.4 */
+ pf->flags |= I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
+ pf->auto_disable_flags = I40E_FLAG_HW_ATR_EVICT_CAPABLE;
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9180,12 +9150,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
- /* vlan gets same features (except vlan offload)
- * after any tweaks for specific VSI types
- */
- netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -9260,6 +9225,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
+ i40e_status aq_ret = 0;
u8 laa_macaddr[ETH_ALEN];
bool found_laa_mac_filter = false;
struct i40e_pf *pf = vsi->back;
@@ -9398,7 +9364,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
- I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+ I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
}
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -9448,6 +9415,18 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
@@ -10444,6 +10423,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u16 flags = 0;
int ret;
/* find out what's out there already */
@@ -10457,6 +10437,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
}
i40e_pf_reset_stats(pf);
+ /* set the switch config bit for the whole device to
+ * support limited promisc or true promisc
+ * when user requests promisc. The default is limited
+ * promisc.
+ */
+
+ if ((pf->hw.pf_id == 0) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+ if (pf->hw.pf_id == 0) {
+ u16 valid_flags;
+
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
/* first time setup */
if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
@@ -10684,11 +10690,9 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+ i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs,
- pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
@@ -10827,6 +10831,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
if (debug != -1) {
pf->msg_enable = pf->hw.debug_mask;
pf->msg_enable = debug;
@@ -10872,12 +10882,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -11069,6 +11073,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11270,7 +11275,6 @@ err_init_lan_hmc:
kfree(pf->qp_pile);
err_sw_init:
err_adminq_setup:
- (void)i40e_shutdown_adminq(hw);
err_pf_reset:
iounmap(hw->hw_addr);
err_ioremap:
@@ -11312,8 +11316,10 @@ static void i40e_remove(struct pci_dev *pdev)
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
+ if (pf->service_timer.data)
+ del_timer_sync(&pf->service_timer);
+ if (pf->service_task.func)
+ cancel_work_sync(&pf->service_task);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f8091..954efe311 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return 0;
}
@@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return 0;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -799,7 +818,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -815,7 +835,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -849,7 +872,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -940,8 +964,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -953,7 +979,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -967,6 +994,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -980,7 +1008,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1030,6 +1059,37 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1189,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5bf..80403c6ee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -130,9 +130,18 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -174,6 +183,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
@@ -228,10 +241,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -308,6 +317,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 565ca7c83..ed39cbad2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then = ns_to_timespec64(delta);
+ struct timespec64 now, then;
unsigned long flags;
+ then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
@@ -288,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
- dev_warn(&vsi->back->pdev->dev,
- "%s: clearing Rx timestamp hang\n",
- __func__);
+ WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6a49b7ae5..a8868e1bf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -749,7 +751,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -767,7 +769,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -1022,7 +1024,6 @@ err:
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -1030,48 +1031,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1080,6 +1055,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -1104,37 +1080,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1155,9 +1100,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1168,6 +1111,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -1186,6 +1130,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1196,160 +1144,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -1358,41 +1268,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -1432,20 +1336,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -1459,7 +1366,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1487,11 +1394,11 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1501,346 +1408,436 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (budget <= 0)
- return 0;
+ if (unlikely(rsyn)) {
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40e_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+ new_buff = &rx_ring->rx_bi[nta];
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+ if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ rx_ring->rx_bi[ntc].skb = skb;
+ return true;
+ }
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40e_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ if (unlikely(
+ i40e_rx_is_fcoe(rx_ptype) &&
+ !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
+
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1849,6 +1846,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1975,9 +1973,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1991,16 +1991,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -2247,15 +2243,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -2292,16 +2286,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2321,9 +2321,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2404,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -2419,6 +2418,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2436,13 +2436,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -2453,6 +2446,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -2461,12 +2459,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -2716,6 +2722,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -2723,12 +2731,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -2739,9 +2749,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2891,7 +2902,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2922,7 +2933,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index a9bd70537..b78c810d1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -184,10 +230,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -216,22 +260,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -258,16 +298,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -301,6 +332,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -324,9 +356,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -377,7 +407,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -423,4 +453,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 3335f9d13..bd5f13bef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -275,6 +275,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -549,6 +554,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1533,4 +1540,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index ab866cf3d..c92a3bdee 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,10 +80,15 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bbf7..1fcafcfa8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
}
/**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
* @vf: pointer to the VF structure
*
* send a link status message to a single VF
@@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
return;
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+ abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
}
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
- /* set splitalways mode 10b */
+ /* set split mode 10b */
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
}
@@ -665,8 +665,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
@@ -688,12 +686,6 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- f = i40e_add_filter(vsi, brdcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
@@ -860,7 +852,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ if (vf->trusted)
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
* VF req validation
@@ -917,9 +913,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
bool rsd = false;
int i;
- u32 reg;
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
return;
@@ -937,6 +933,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
i40e_flush(hw);
}
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ i40e_flush(hw);
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -988,6 +989,7 @@ complete_reset:
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -1227,8 +1229,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_invalid_msgs++;
- dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
if (vf->num_invalid_msgs >
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
dev_err(&pf->pdev->dev,
@@ -1246,9 +1248,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
return -EIO;
}
@@ -1306,8 +1308,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- int i = 0, len = 0;
int num_vsis = 1;
+ int len = 0;
int ret;
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1342,12 +1344,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
}
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1356,8 +1362,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+ vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto err;
+ }
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1368,16 +1382,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
if (vf->lan_vsi_idx) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+ vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
- vfres->vsi_res[i].qset_handle
+ vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
- ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
- i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1407,6 +1423,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int num_vlans = 0;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1422,22 +1457,128 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
(struct i40e_virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_vsi *vsi;
+ struct i40e_mac_filter *f;
+ i40e_status aq_ret = 0;
bool allmulti = false;
- i40e_status aq_ret;
+ struct i40e_vsi *vsi;
+ bool alluni = false;
+ int aq_err = 0;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (vsi->type != I40E_VSI_FCOE)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ aq_ret = 0;
+ goto error_param;
+ }
+ /* Multicast promiscuous handling*/
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+ allmulti,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+ vsi->seid,
+ allmulti,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ break;
+ }
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ goto error_param_int;
+ }
+ }
+
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ if (allmulti)
+ set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+ alluni,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ aq_ret = 0;
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+ aq_ret =
+ i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+ vsi->seid,
+ alluni,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ }
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL,
+ true);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+ vf->vf_id, info->flags,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+error_param_int:
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ if (alluni)
+ set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ }
error_param:
/* send the response to the VF */
@@ -1688,6 +1829,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -1708,15 +1853,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
ret = I40E_ERR_INVALID_MAC_ADDR;
} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
!ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
/* If the host VMM administrator has set the VF MAC address
* administratively via the ndo_set_vf_mac command then deny
* permission to the VF to add or delete unicast MAC addresses.
+ * Unless the VF is privileged and then it can do whatever.
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more functionality\n");
ret = -EPERM;
}
return ret;
@@ -1741,7 +1893,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1780,6 +1931,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac++;
}
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1815,7 +1968,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1839,6 +1991,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1873,8 +2027,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ goto error_param;
+ }
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1898,6 +2057,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan++;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -1929,7 +2101,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1950,6 +2121,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan--;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -2029,6 +2213,135 @@ error_param:
}
/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrk->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrl->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ int len = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_hena);
+
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+ /* send the response back to the VF */
+ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret, (u8 *)vrh, len);
+ return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(vrh->hena >> 32));
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2041,7 +2354,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len;
+ int valid_len = 0;
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2053,13 +2366,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(vf))
valid_len = sizeof(u32);
- else
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2149,6 +2459,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct i40e_virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2175,11 +2514,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
* called from the common aeq/arq handler to
* process request from VF
**/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -2247,6 +2586,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2268,9 +2620,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
- u32 reg, reg_idx, bit_idx, vf_id;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
struct i40e_vf *vf;
+ int vf_id;
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
@@ -2292,13 +2645,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
- if (!test_bit(__I40E_DOWN, &pf->state))
- i40e_reset_vf(vf, true);
- }
+ if (reg & BIT(bit_idx))
+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+ i40e_reset_vf(vf, true);
}
return 0;
@@ -2762,3 +3111,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ if (!vf)
+ return -EINVAL;
+ if (setting == vf->trusted)
+ goto out;
+
+ vf->trusted = setting;
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, setting ? "" : "un");
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e7b2fba03..875174141 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -61,6 +61,8 @@ enum i40e_vf_states {
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
+ I40E_VF_STAT_MC_PROMISC,
+ I40E_VF_STAT_UC_PROMISC,
};
/* VF capabilities */
@@ -75,7 +77,7 @@ struct i40e_vf {
struct i40e_pf *pf;
/* VF id in the PF space */
- u16 vf_id;
+ s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
struct i40e_virtchnl_version_info vf_ver;
@@ -88,6 +90,7 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
+ bool trusted;
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -108,6 +111,9 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+
/* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
@@ -115,7 +121,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -127,6 +133,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d9a..1f9b3b5d9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aad8d6277..3114dcfa1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6ad8..8f6420400 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,8 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ case I40E_DEV_ID_QSFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c3d..d34972bab 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,8 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_QSFP_I_X722 0x37D4
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index cea97daa8..79d99cd91 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -267,7 +269,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +287,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -494,7 +496,6 @@ err:
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -502,48 +503,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -552,6 +527,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -576,37 +552,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -627,9 +572,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -640,6 +583,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -658,6 +602,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -668,160 +616,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -830,41 +740,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -904,20 +808,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -931,7 +838,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -959,7 +866,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
@@ -973,313 +880,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40evf_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
-
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ new_buff = &rx_ring->rx_bi[nta];
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40evf_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* populate checksum, VLAN, and protocol */
+ i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1288,6 +1293,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1411,9 +1417,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1427,16 +1435,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -1514,15 +1518,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -1559,16 +1561,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1588,9 +1596,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1637,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -1644,6 +1651,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1661,13 +1669,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -1678,6 +1679,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -1686,12 +1692,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -1935,6 +1949,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -1942,12 +1958,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -1958,9 +1976,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2109,7 +2128,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2140,7 +2159,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0429553fe..0112277e5 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -183,10 +229,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -215,22 +259,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -249,16 +289,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -290,6 +321,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -313,9 +345,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b6d..97f96e0d9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -258,6 +258,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -522,6 +527,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1336,46 @@ enum i40e_reset_type {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director and flexible payload */
+#define I40E_FD_INSET_L3_SRC_SHIFT 47
+#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_SRC_SHIFT)
+#define I40E_FD_INSET_L3_DST_SHIFT 35
+#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_DST_SHIFT)
+#define I40E_FD_INSET_L4_SRC_SHIFT 34
+#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_SRC_SHIFT)
+#define I40E_FD_INSET_L4_DST_SHIFT 33
+#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_DST_SHIFT)
+#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
+#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_VERIFY_TAG_SHIFT)
+
+#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
+#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD50_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
+#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD51_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
+#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD52_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
+#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD53_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
+#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD54_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
+#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD55_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
+#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD56_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
+#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d20374..f04ce6cb7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index e657eccd2..76ed97db2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -67,8 +67,6 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@ struct i40e_vsi {
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
-#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
-#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
#define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
@@ -210,9 +205,6 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
@@ -222,6 +214,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_PROMISC_ON BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -239,8 +233,17 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
/* OS defined structs */
struct net_device *netdev;
@@ -256,10 +259,18 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+ (_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ 0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+ (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +282,16 @@ struct i40evf_adapter {
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
u32 aq_wait_count;
+ /* RSS stuff */
+ u64 hena;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ u8 *rss_key;
+ u8 *rss_lut;
};
/* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS BIT(0)
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dd4430aae..c9c202f6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- } else if (sset == ETH_SS_PRIV_FLAGS) {
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40evf_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
}
}
@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev,
}
/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct i40e_hw *hw = &adapter->hw;
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* We always hash on IP src and dest addresses */
- cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- break;
-
- case TCP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- break;
- default:
- cmd->data = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
ret = 0;
break;
case ETHTOOL_GRXFH:
- ret = i40evf_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- struct i40e_hw *hw = &adapter->hw;
- u32 flags = adapter->vf_res->vf_offload_flags;
-
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case TCP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- } else {
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- break;
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- break;
- case IPV4_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case IPV6_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
-
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- i40e_flush(hw);
-
- return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ netdev_info(netdev,
+ "RSS hash info is not available to vf, use pf.\n");
break;
default:
break;
@@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev,
return ret;
}
-
/**
* i40evf_get_channels: get the number of channels supported by the device
* @netdev: network interface device structure
@@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev,
}
/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_key_size;
+}
+
+/**
* i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
*
@@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev,
**/
static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
{
- return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_lut_size;
}
/**
@@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL, *lut;
- int ret;
u16 i;
if (hfunc)
@@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!indir)
return 0;
- seed = key;
-
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- if (ret)
- goto out;
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)lut[i];
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
-out:
- kfree(lut);
-
- return ret;
+ return 0;
}
/**
@@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL;
u16 i;
/* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
if (key) {
- if (!vsi->rss_hkey_user) {
- vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_hkey_user)
- return -ENOMEM;
- }
- memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
- seed = vsi->rss_hkey_user;
- }
- if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_lut_user)
- return -ENOMEM;
+ memcpy(adapter->rss_key, key, adapter->rss_key_size);
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
-
- return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
- I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- u32 ret_flags = 0;
-
- ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
- I40EVF_PRIV_FLAGS_PS : 0;
-
- return ret_flags;
-}
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- bool reset_required = false;
-
- if ((flags & I40EVF_PRIV_FLAGS_PS) &&
- !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
- (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- }
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40evf_schedule_reset(adapter);
-
- return 0;
+ return i40evf_config_rss(adapter);
}
static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
- .get_priv_flags = i40evf_get_priv_flags,
- .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
- .set_rxnfc = i40evf_set_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
+ .get_rxfh_key_size = i40evf_get_rxfh_key_size,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae2f..16c552952 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 15
+#define DRV_VERSION_MINOR 5
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -641,28 +641,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
- int rx_buf_len;
-
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
- netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = I40EVF_RXBUFFER_2048;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- set_ring_ps_enabled(&adapter->rx_rings[i]);
- adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
- } else {
- clear_ring_ps_enabled(&adapter->rx_rings[i]);
- }
+ adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
}
}
@@ -943,6 +926,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
bottom_of_search_loop:
continue;
}
+
+ if (netdev->flags & IFF_PROMISC &&
+ !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ else if (!(netdev->flags & IFF_PROMISC) &&
+ adapter->flags & I40EVF_FLAG_PROMISC_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ else if (!(netdev->flags & IFF_ALLMULTI) &&
+ adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -999,14 +997,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i];
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- i40evf_alloc_rx_headers(ring);
- i40evf_alloc_rx_buffers_ps(ring, ring->count);
- } else {
- i40evf_alloc_rx_buffers_1buf(ring, ring->count);
- }
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, ring->tail);
+ i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
}
}
@@ -1224,24 +1215,18 @@ out:
}
/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_aqc_get_set_rss_key_data *rss_key =
+ (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (!vsi->id)
- return -EINVAL;
-
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1234,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
return -EBUSY;
}
- if (seed) {
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)seed;
- ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
- if (ret) {
- dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+
}
- if (lut) {
- ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
}
return ret;
+
}
/**
* i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
+ u32 *dw;
u16 i;
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
+ dw = (u32 *)adapter->rss_key;
+ for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
+ dw = (u32 *)adapter->rss_lut;
+ for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
- }
i40e_flush(hw);
return 0;
}
/**
- * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- int ret = 0;
-
- if (seed) {
- ret = i40evf_aq_get_rss_key(hw, vsi->id,
- (struct i40e_aqc_get_set_rss_key_data *)seed);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- if (lut) {
- ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- return ret;
-}
-
-/**
- * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- u16 i;
-
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
-
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
- }
-
- return 0;
-}
-
-/**
* i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
-
- if (RSS_AQ(adapter))
- return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
- if (RSS_AQ(adapter))
- return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+ if (RSS_PF(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+ I40EVF_FLAG_AQ_SET_RSS_KEY;
+ return 0;
+ } else if (RSS_AQ(adapter)) {
+ return i40evf_config_rss_aq(adapter);
+ } else {
+ return i40evf_config_rss_reg(adapter);
+ }
}
/**
* i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
**/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
{
u16 i;
- for (i = 0; i < rss_table_size; i++)
- lut[i] = i % rss_size;
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = i % adapter->num_active_queues;
}
/**
@@ -1451,42 +1320,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
**/
static int i40evf_init_rss(struct i40evf_adapter *adapter)
{
- struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
- u8 seed[I40EVF_HKEY_ARRAY_SIZE];
- u64 hena;
- u8 *lut;
int ret;
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
- else
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ if (!RSS_PF(adapter)) {
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ else
+ adapter->hena = I40E_DEFAULT_RSS_HENA;
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ }
- /* Use user configured lut if there is one, otherwise use default */
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
- else
- i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
- adapter->num_active_queues);
+ i40evf_fill_rss_lut(adapter);
- /* Use user configured hash key if there is one, otherwise
- * user default.
- */
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
- else
- netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
- ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- kfree(lut);
+ netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+ ret = i40evf_config_rss(adapter);
return ret;
}
@@ -1507,7 +1359,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
- goto err_out;
+ return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1371,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
}
return 0;
-
-err_out:
- while (q_idx) {
- q_idx--;
- q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
- }
- kfree(adapter->q_vectors);
- return -ENOMEM;
}
/**
@@ -1610,19 +1453,16 @@ err_set_interrupt:
}
/**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
**/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
{
- if (!vsi)
- return;
-
- kfree(vsi->rss_hkey_user);
- vsi->rss_hkey_user = NULL;
+ kfree(adapter->rss_key);
+ adapter->rss_key = NULL;
- kfree(vsi->rss_lut_user);
- vsi->rss_lut_user = NULL;
+ kfree(adapter->rss_lut);
+ adapter->rss_lut = NULL;
}
/**
@@ -1756,6 +1596,39 @@ static void i40evf_watchdog_task(struct work_struct *work)
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+ i40evf_get_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+ i40evf_set_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+ i40evf_set_rss_key(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+ i40evf_set_rss_lut(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+ (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+ i40evf_set_promiscuous(adapter, 0);
+ goto watchdog_done;
+ }
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -2003,6 +1876,8 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
+ if (val == 0xdeadbeef) /* indicates device in reset */
+ goto freedom;
oldval = val;
if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2134,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX |\
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ features &= ~I40EVF_VLAN_FEATURES;
+ if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ features |= I40EVF_VLAN_FEATURES;
+ return features;
+}
+
static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
@@ -2271,6 +2168,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
@@ -2307,57 +2205,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
+ struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
/* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ for (i = 0; i < vfres->num_vsis; i++) {
+ if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
return -ENODEV;
}
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
+
+ /* Write features and hw_features separately to avoid polluting
+ * with, or dropping, features that are set when we registgered.
+ */
+ netdev->hw_features |= netdev->hw_enc_features;
+
+ netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+ /* disable VLAN features if not supported */
+ if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+ netdev->features ^= I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2368,8 +2270,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
- adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = vfres->rss_key_size;
+ adapter->rss_lut_size = vfres->rss_lut_size;
+ } else {
+ adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+ }
+
return 0;
}
@@ -2502,11 +2412,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
- adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
- /* Default to single buffer rx, can be changed through ethtool. */
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
@@ -2565,6 +2470,11 @@ static void i40evf_init_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+ if (!adapter->rss_key || !adapter->rss_lut)
+ goto err_mem;
+
if (RSS_AQ(adapter)) {
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2575,7 +2485,8 @@ static void i40evf_init_task(struct work_struct *work)
restart:
schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
-
+err_mem:
+ i40evf_free_rss(adapter);
err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
@@ -2838,11 +2749,11 @@ static void i40evf_remove(struct pci_dev *pdev)
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
if (!i40evf_asq_done(hw)) {
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
}
if (adapter->msix_entries) {
@@ -2857,8 +2768,7 @@ static void i40evf_remove(struct pci_dev *pdev)
flush_scheduled_work();
- /* Clear user configurations for RSS */
- i40evf_clear_rss_config_user(&adapter->vsi);
+ i40evf_free_rss(adapter);
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
@@ -2869,7 +2779,6 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
-
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 488e738f7..f13445691 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- vqpi->rxq.splithdr_enabled = true;
- vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
- }
vqpi++;
}
@@ -645,6 +641,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
struct i40e_virtchnl_promisc_info vpi;
+ int promisc_all;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -652,6 +649,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
adapter->current_op);
return;
}
+
+ promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC;
+ if ((flags & promisc_all) == promisc_all) {
+ adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
+ if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!flags) {
+ adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
@@ -681,6 +699,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_hena vrh;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh.hena = adapter->hena;
+ adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_key *vrk;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_key) +
+ (adapter->rss_key_size * sizeof(u8)) - 1;
+ vrk = kzalloc(len, GFP_KERNEL);
+ if (!vrk)
+ return;
+ vrk->vsi_id = adapter->vsi.id;
+ vrk->key_len = adapter->rss_key_size;
+ memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)vrk, len);
+ kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_lut *vrl;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_lut) +
+ (adapter->rss_lut_size * sizeof(u8)) - 1;
+ vrl = kzalloc(len, GFP_KERNEL);
+ if (!vrl)
+ return;
+ vrl->vsi_id = adapter->vsi.id;
+ vrl->lut_entries = adapter->rss_lut_size;
+ memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)vrl, len);
+ kfree(vrl);
+}
+
/**
* i40evf_request_reset
* @adapter: adapter structure
@@ -820,6 +947,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ if (msglen == sizeof(*vrh))
+ adapter->hena = vrh->hena;
+ else
+ dev_warn(&adapter->pdev->dev,
+ "Invalid message %d from PF\n", v_opcode);
+ }
+ break;
default:
if (v_opcode != adapter->current_op)
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a23aa6704..a61447fd7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
if (size > 15)
size = 15;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
16 : 8;
break;
}
- if (nvm->word_size == (1 << 15))
+ if (nvm->word_size == BIT(15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.write = igb_write_nvm_spi;
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
- if (nvm->word_size < (1 << 15))
+ if (nvm->word_size < BIT(15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
/* The PF can spoof - it has to in order to
* support emulation mode NICs
*/
- reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
} else {
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index de8805a2a..199ff9820 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE BIT(26)
+#define E1000_ETQF_1588 BIT(30)
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
@@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN BIT(30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14)
#define ALL_QUEUES 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index e9f23ee8f..2997c443c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -530,65 +530,65 @@
/* Time Sync Interrupt Cause/Mask Register Bits */
-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */
#define TSYNC_INTERRUPTS TSINTR_TXTS
#define E1000_TSICR_TXTS TSINTR_TXTS
/* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */
/* SDP Configuration Bits */
-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
@@ -997,8 +997,8 @@
#define E1000_M88E1543_FIBER_CTRL 0x0
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */
#define E1000_PCS_STATUS_DEV_I354 3
#define E1000_PCS_STATUS_ADDR_I354 1
#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 07cf4fe58..5010e2232 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = adapter->shadow_vfta[regidx];
/* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
bits = rd32(E1000_VLVF(vlvf_index));
/* set the pool bit */
- bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
/* Clear VFTA first, then disable VLVF. Otherwise
@@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
mta = array_rd32(E1000_MTA, hash_reg);
- mta |= (1 << hash_bit);
+ mta |= BIT(hash_bit);
array_wr32(E1000_MTA, hash_reg, mta);
wrfl();
@@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 10f5c9e01..00e263f0c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
u32 vflre = rd32(E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
- if (vflre & (1 << vf_number)) {
+ if (vflre & BIT(vf_number)) {
ret_val = 0;
- wr32(E1000_VFLRE, (1 << vf_number));
+ wr32(E1000_VFLRE, BIT(vf_number));
hw->mbx.stats.rsts++;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e8280d0d7..3582c5cf8 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = rd32(E1000_EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = 1u << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 969a6ddaf..9b622b33b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_ADDR_REG 16
#define I82580_CFG_REG 22
-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */
#define I82580_CTRL_REG 23
-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10)
/* 82580 specific PHY registers */
#define I82580_PHY_CTRL_2 18
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9413fa613..b9609afa5 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -91,6 +91,14 @@ struct igb_adapter;
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10 9542
+#define IGB_I210_TX_LATENCY_100 1024
+#define IGB_I210_TX_LATENCY_1000 178
+#define IGB_I210_RX_LATENCY_10 20662
+#define IGB_I210_RX_LATENCY_100 2213
+#define IGB_I210_RX_LATENCY_1000 448
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@ enum igb_tx_flags {
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -466,21 +474,21 @@ struct igb_adapter {
u16 eee_advert;
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
-#define IGB_FLAG_MEDIA_RESET (1 << 10)
-#define IGB_FLAG_MAS_CAPABLE (1 << 11)
-#define IGB_FLAG_MAS_ENABLE (1 << 12)
-#define IGB_FLAG_HAS_MSIX (1 << 13)
-#define IGB_FLAG_EEE (1 << 14)
+#define IGB_FLAG_HAS_MSI BIT(0)
+#define IGB_FLAG_DCA_ENABLED BIT(1)
+#define IGB_FLAG_QUAD_PORT_A BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS BIT(3)
+#define IGB_FLAG_DMAC BIT(4)
+#define IGB_FLAG_PTP BIT(5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGB_FLAG_MEDIA_RESET BIT(10)
+#define IGB_FLAG_MAS_CAPABLE BIT(11)
+#define IGB_FLAG_MAS_ENABLE BIT(12)
+#define IGB_FLAG_HAS_MSIX BIT(13)
+#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243d1..64e91c575 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev,
memset(p, 0, IGB_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 31; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!(mask & ics_mask))
continue;
@@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
/* 82576 does not support timestamping all packets. */
if (adapter->hw.mac.type >= e1000_82580)
- info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
else
info->rx_filters |=
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
default:
@@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+ status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
if (status) {
/* Error occurred while reading module */
kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405cb..ef3d642f5 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/pm_runtime.h>
+#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
@@ -382,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -835,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue & 0x7,
((tx_queue & 0x8) << 1) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
case e1000_82580:
case e1000_i350:
@@ -856,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue >> 1,
((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
default:
BUG();
@@ -918,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
E1000_GPIE_NSICR);
/* enable msix_other interrupt */
- adapter->eims_other = 1 << vector;
+ adapter->eims_other = BIT(vector);
tmp = (vector++ | E1000_IVAR_VALID) << 8;
wr32(E1000_IVAR_MISC, tmp);
@@ -2086,6 +2087,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
+#define IGB_MAX_MAC_HDR_LEN 127
+#define IGB_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2110,7 +2145,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igb_features_check,
};
/**
@@ -2376,38 +2411,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
/* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL;
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- /* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2442,9 +2482,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* copy the MAC address out of the NVM */
- if (hw->mac.ops.read_mac_addr(hw))
- dev_err(&pdev->dev, "NVM Read Error\n");
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
@@ -4061,7 +4103,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
- vlvf |= 1 << pf_id;
+ vlvf |= BIT(pf_id);
wr32(E1000_VLVF(i), vlvf);
}
@@ -4088,7 +4130,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
/* guarantee that we don't scrub out management VLAN */
vid = adapter->mng_vlan_id;
if (vid >= vid_start && vid < vid_end)
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
if (!adapter->vfs_allocated_count)
goto set_vfta;
@@ -4107,7 +4149,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
if (vlvf & E1000_VLVF_VLANID_ENABLE) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4115,7 +4157,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
}
/* remove PF from the pool */
- bits = ~(1 << pf_id);
+ bits = ~BIT(pf_id);
bits &= rd32(E1000_VLVF(i));
wr32(E1000_VLVF(i), bits);
}
@@ -4273,13 +4315,13 @@ static void igb_spoof_check(struct igb_adapter *adapter)
return;
for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ if (adapter->wvbr & BIT(j) ||
+ adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
dev_warn(&adapter->pdev->dev,
"Spoof event(s) detected on VF %d\n", j);
adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ ~(BIT(j) |
+ BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
}
}
}
@@ -4839,9 +4881,18 @@ static int igb_tso(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4854,45 +4905,52 @@ static int igb_tso(struct igb_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* MSS L4LEN IDX */
- mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
/* VLAN MACLEN IPLEN */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5960,11 +6018,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
/* create mask for VF and other pools */
pool_mask = E1000_VLVF_POOLSEL_MASK;
- vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+ vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
/* drop PF from pool bits */
- pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count));
+ pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
/* Find the vlan filter for this id */
for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5987,7 +6045,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
goto update_vlvf;
vid = vlvf & E1000_VLVF_VLANID_MASK;
- vfta_mask = 1 << (vid % 32);
+ vfta_mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = adapter->shadow_vfta[vid / 32];
@@ -6024,7 +6082,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
return idx;
}
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
{
struct e1000_hw *hw = &adapter->hw;
u32 bits, pf_id;
@@ -6038,13 +6096,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
* entry other than the PF.
*/
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
- bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+ bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
bits &= rd32(E1000_VLVF(idx));
/* Disable the filter so this falls into the default pool. */
if (!bits) {
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
- wr32(E1000_VLVF(idx), 1 << pf_id);
+ wr32(E1000_VLVF(idx), BIT(pf_id));
else
wr32(E1000_VLVF(idx), 0);
}
@@ -6228,9 +6286,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
- wr32(E1000_VFTE, reg | (1 << vf));
+ wr32(E1000_VFTE, reg | BIT(vf));
reg = rd32(E1000_VFRE);
- wr32(E1000_VFRE, reg | (1 << vf));
+ wr32(E1000_VFRE, reg | BIT(vf));
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
@@ -6522,13 +6580,14 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca(q_vector);
#endif
if (q_vector->tx.ring)
- clean_complete = igb_clean_tx_irq(q_vector);
+ clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
int cleaned = igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
- clean_complete &= (cleaned < budget);
+ if (cleaned >= budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -6545,10 +6604,11 @@ static int igb_poll(struct napi_struct *napi, int budget)
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
*
* returns true if ring is completely cleaned
**/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6647,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -7574,7 +7634,6 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- rtnl_unlock();
return -ENOMEM;
}
@@ -7845,11 +7904,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to CPU endian
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
*/
- rar_low = le32_to_cpup((__be32 *)(addr));
- rar_high = le16_to_cpup((__be16 *)(addr + 4));
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV;
@@ -7921,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+ rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8011,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
reg_val = rd32(reg_offset);
if (setting)
- reg_val |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val |= (BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
else
- reg_val &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val &= ~(BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 22a8a2989..f097c5a8a 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,9 +69,9 @@
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -722,11 +722,29 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
+ int adjust = 0;
regval = rd32(E1000_TXSTMPL);
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ /* adjust timestamp for the TX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_TX_LATENCY_1000;
+ break;
+ }
+ }
+
+ shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
@@ -771,6 +789,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
+ int adjust = 0;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -790,6 +809,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ae3f28332..ee1ef08d7 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -113,7 +113,7 @@
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index b74ce53d7..8dea1b136 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev,
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f166baab8..6f4290d6d 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -287,8 +287,8 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index c12442252..b0778ba65 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFFFF00;
ivar |= msix_vector | E1000_IVAR_VALID;
}
- adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFF00FF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
}
- adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
}
@@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
ew32(IVAR_MISC, tmp);
- adapter->eims_enable_mask = (1 << (vector)) - 1;
- adapter->eims_other = 1 << (vector - 1);
+ adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+ adapter->eims_other = BIT(vector - 1);
e1e_flush();
}
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct igbvf_ring *rx_ring = adapter->rx_ring;
u64 rdba;
- u32 rdlen, rxdctl;
+ u32 rxdctl;
/* disable receives */
rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
e1e_flush();
msleep(10);
- rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
@@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
buffer_info->dma = 0;
}
-static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
- __be16 protocol)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- unsigned int i;
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
- *hdr_len = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
err = skb_cow_head(skb, 0);
- if (err < 0) {
- dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ if (err < 0)
return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
- i = tx_ring->next_to_use;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ ip.v4->tot_len = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
- if (protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
- buffer_info->time_stamp = jiffies;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
- tx_ring->next_to_use = i;
+ igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
- return true;
+ return 1;
}
static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
}
#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+ tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev,
return 0;
}
+#define IGBVF_MAX_MAC_HDR_LEN 127
+#define IGBVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
@@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igbvf_features_check,
};
/**
@@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IGBVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
-
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index a13baa90a..335ba6642 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
- msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
mbx->ops.write_posted(hw, msgbuf, 2);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e4949af7d..9f2db1855 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
- u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
@@ -173,7 +170,7 @@ struct vf_macvlans {
};
#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
return rc == IXGBE_QV_STATE_IDLE;
}
@@ -623,44 +620,45 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
@@ -795,7 +793,7 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
#define IXGBE_MAX_LINK_HANDLE 10
- struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+ struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
unsigned long tables;
/* maximum number of RETA entries among all devices supported by ixgbe
@@ -806,6 +804,8 @@ struct ixgbe_adapter {
#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)];
+
+ bool need_crosstalk_fix;
};
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
@@ -817,6 +817,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
return IXGBE_MAX_RSS_INDICES;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -827,7 +828,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
enum ixgbe_state_t {
@@ -860,13 +861,15 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_a,
};
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-extern struct ixgbe_info ixgbe_X550_info;
-extern struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_82598_info;
+extern const struct ixgbe_info ixgbe_82599_info;
+extern const struct ixgbe_info ixgbe_X540_info;
+extern const struct ixgbe_info ixgbe_X550_info;
+extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_a_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
@@ -893,8 +896,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8a5..fb51be74d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -792,7 +792,7 @@ mac_reset_top:
}
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
- gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
/* Turn on this VLAN id */
- bits |= (1 << bitindex);
+ bits |= BIT(bitindex);
else
/* Turn off this VLAN id */
- bits &= ~(1 << bitindex);
+ bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0;
@@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
}
-static struct ixgbe_mac_operations mac_ops_82598 = {
+static const struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
.start_hw = &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82598 = {
+static const struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82598_info = {
+const struct ixgbe_info ixgbe_82598_info = {
.mac = ixgbe_mac_82598EB,
.get_invariants = &ixgbe_get_invariants_82598,
.mac_ops = &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f40a..47afed74a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \
} while (0)
@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0)
@@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
@@ -2181,7 +2182,7 @@ release_i2c_access:
return status;
}
-static struct ixgbe_mac_operations mac_ops_82599 = {
+static const struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
.start_hw = &ixgbe_start_hw_82599,
@@ -2220,6 +2221,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2229,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599,
@@ -2235,7 +2238,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eeprom_82599,
.read_buffer = &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2249,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82599 = {
+static const struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2266,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82599_info = {
+const struct ixgbe_info ixgbe_82599_info = {
.mac = ixgbe_mac_82599EB,
.get_invariants = &ixgbe_get_invariants_82599,
.mac_ops = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 64045053e..902d2061c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
@@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
@@ -296,7 +297,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ret_val = ixgbe_setup_fc(hw);
+ ret_val = hw->mac.ops.setup_fc(hw);
if (ret_val)
return ret_val;
@@ -681,6 +682,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
+ u16 ee_ctrl_4;
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +693,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -816,8 +825,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1493,7 +1502,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count
*/
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
for (i = 0; i < count; i++) {
/*
@@ -1982,7 +1991,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+ hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
}
/**
@@ -2854,6 +2863,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -2911,10 +2921,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0;
}
} else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
+ mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- mpsar_hi &= ~(1 << (vmdq - 32));
+ mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
@@ -2943,11 +2953,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
+ mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
+ mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
@@ -2968,11 +2978,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
}
return 0;
@@ -3072,7 +3082,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value
@@ -3103,12 +3113,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */
- bits |= 1 << (vind % 32);
+ bits |= BIT(vind % 32);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (vind % 32);
+ bits ^= BIT(vind % 32);
if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3300,43 +3310,25 @@ wwn_prefix_err:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= BIT(vf_target_shift);
+ else
+ pfvfspoof &= ~BIT(vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -3357,9 +3349,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -3483,18 +3475,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
- u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u32 hicr, i, bi, fwsts;
u16 buf_len, dword_len;
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ s32 status;
- if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3503,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
+ if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
+ if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, cpu_to_le32(bp->u32arr[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3536,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if ((timeout != 0 && i == timeout) ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
hw_dbg(hw, "Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- return 0;
+ buf_len = bp->hdr.buf_len;
+ if (!buf_len)
+ goto rel_out;
- if (length < (buf_len + hdr_size)) {
+ if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (bi is where we left off)*/
+ /* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -3594,13 +3601,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
s32 ret_val;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
- return IXGBE_ERR_SWFW_SYNC;
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.port_num = hw->bus.func;
fw_cmd.ver_maj = maj;
fw_cmd.ver_min = min;
fw_cmd.ver_build = build;
@@ -3612,7 +3616,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(fw_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
@@ -3628,7 +3632,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b9563137..6d4c260d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
@@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, u32 timeout, bool return_data);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+ u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333a9..072ef3b5f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled)
- *pfc_en |= 1 << tc;
+ *pfc_en |= BIT(tc);
}
}
@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
- u8 prio_mask = 1 << up;
+ u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
@@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3ba63f9a..b79e93a5b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (!(pfc_en & (1 << i))) {
+ if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index b5cc989a3..1011d6449 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) {
- if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 2707bda37..b8fc3cfec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
};
u8 up = dcb_getapp(adapter->netdev, &app);
- if (up && !(up & (1 << adapter->fcoe.up)))
+ if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG;
#endif
@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app_mask ?
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index b3530e1e3..59b771b9b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* Flow Control */
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 4; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
for (i = 0; i < 8; i++) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
- regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
- regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+ regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
for (i = 0; i < 8; i++)
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
- regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+ regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
for (i = 0; i < 8; i++)
regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!shared_int) {
/*
@@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2991,6 +2994,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3007,14 +3011,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e764d..bcdc88444 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7df3fe29b..8bebd862a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -53,15 +53,7 @@
#include <net/vxlan.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2015 Intel Corporation.";
+ "Copyright (c) 1999-2016 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_a] = &ixgbe_x550em_a_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
};
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
+ if (unlikely(hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+ struct ixgbe_adapter *adapter;
+ int i;
+
+ for (i = 0; i < 200; ++i) {
+ value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+ if (likely(!value))
+ goto writes_completed;
+ if (value == IXGBE_FAILED_READ_REG) {
+ ixgbe_remove_adapter(hw);
+ return IXGBE_FAILED_READ_REG;
+ }
+ udelay(5);
+ }
+
+ adapter = hw->back;
+ e_warn(hw, "register writes incomplete %08x\n", value);
+ }
+
+writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("%-15s %016lX %016lX %016lX\n",
netdev->name,
netdev->state,
- netdev->trans_start,
+ dev_trans_start(netdev),
netdev->last_rx);
}
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+ int queue_index, u32 maxrate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+ if (!maxrate)
+ return 0;
+
+ /* Calculate the rate factor values to set */
+ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+ bcnrc_val /= maxrate;
+
+ /* clear everything but the rate factor */
+ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+ IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+ /* enable the rate scheduler */
+ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+ return 0;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
- u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
}
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
return false;
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ case ixgbe_mac_x550em_a:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- ixgbe_for_each_ring(ring, q_vector->tx)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+ ixgbe_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Exit if we are called by netpoll or busy polling is active */
if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
ixgbe_qv_unlock_napi(q_vector);
@@ -2818,9 +2885,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* currently 40.
*/
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
- txdctl |= (1 << 16); /* WTHRESH = 1 */
+ txdctl |= 1u << 16; /* WTHRESH = 1 */
else
- txdctl |= (8 << 16); /* WTHRESH = 8 */
+ txdctl |= 8u << 16; /* WTHRESH = 8 */
/*
* Setting PTHRESH to 32 both improves performance
* and avoids a TX hang with DFP enabled
*/
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-
- /* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
- adapter->num_vfs);
-
- /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
- * calling set_ethertype_anti_spoofing for each VF in loop below
- */
- if (hw->mac.ops.set_ethertype_anti_spoofing) {
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
-
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- ETH_P_PAUSE));
- }
-
- /* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
- if (!adapter->vfinfo[i].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
- /* enable ethertype anti spoofing if hw supports it */
- if (hw->mac.ops.set_ethertype_anti_spoofing)
- hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+ /* configure spoof checking */
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+ adapter->vfinfo[i].spoofchk_enabled);
/* Enable/Disable RSS query feature */
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
/* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
* entry other than the PF.
*/
word = idx * 2 + (VMDQ_P(0) / 32);
- bits = ~(1 << (VMDQ_P(0)) % 32);
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
- ixgbe_update_pf_promisc_vlvf(adapter, vid);
- else
+ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4057,6 +4105,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4081,7 +4130,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
- vlvfb |= 1 << (VMDQ_P(0) % 32);
+ vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
}
@@ -4111,7 +4160,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
if (vlvf) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4169,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* remove PF from the pool */
word = i * 2 + VMDQ_P(0) / 32;
- bits = ~(1 << (VMDQ_P(0) % 32));
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
}
@@ -4147,6 +4196,7 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
@@ -4172,11 +4222,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
- u16 vid;
+ u16 vid = 1;
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
@@ -4426,6 +4476,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ netdev_features_t features = netdev->features;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4494,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
- ixgbe_vlan_promisc_enable(adapter);
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
hw->addr_ctrl.user_set_promisc = false;
- ixgbe_vlan_promisc_disable(adapter);
}
/*
@@ -4483,7 +4533,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
}
/* This is useful for sniffing bad packets. */
- if (adapter->netdev->features & NETIF_F_RXALL) {
+ if (features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4546,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ixgbe_vlan_promisc_disable(adapter);
+ else
+ ixgbe_vlan_promisc_enable(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4585,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
adapter->vxlan_port = 0;
break;
@@ -4630,6 +4686,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4690,6 +4747,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4805,9 +4863,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4871,7 +4929,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
/* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring);
rx_ring->l2_accel_priv = NULL;
}
@@ -5106,6 +5164,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5215,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
gpie |= IXGBE_SDP0_GPIEN_X540;
break;
default:
@@ -5228,7 +5288,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -5467,6 +5527,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5498,6 +5559,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
ixgbe_tx_timeout_reset(adapter);
}
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct tc_configuration *tc;
+ int j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+ break;
+ }
+
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
@@ -5512,10 +5625,8 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
- int j;
- struct tc_configuration *tc;
-#endif
+ u16 device_caps;
+ int i;
/* PCI config space info */
@@ -5537,6 +5648,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5547,7 +5662,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/* initialize static ixgbe jump table entries */
- adapter->jump_tables[0] = ixgbe_ipv4_fields;
+ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+ GFP_KERNEL);
+ if (!adapter->jump_tables[0])
+ return -ENOMEM;
+ adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+ adapter->jump_tables[i] = NULL;
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
@@ -5585,6 +5707,17 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ /* Fall Through */
case ixgbe_mac_X550:
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
@@ -5606,42 +5739,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
- break;
- default:
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- break;
- }
-
- /* Configure DCB traffic classes */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[DCB_TX_CONFIG].bwg_id = 0;
- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->path[DCB_RX_CONFIG].bwg_id = 0;
- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->dcb_pfc = pfc_disabled;
- }
-
- /* Initialize default user to priority mapping, UPx->TC0 */
- tc = &adapter->dcb_cfg.tc_config[0];
- tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
- tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_set_bitmap = 0x00;
- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
- memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- sizeof(adapter->temp_dcb_cfg));
-
+ ixgbe_init_dcb(adapter);
#endif
/* default flow control settings */
@@ -5675,6 +5773,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
+ /* Cache bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ adapter->need_crosstalk_fix = false;
+ else
+ adapter->need_crosstalk_fix = true;
+ break;
+ default:
+ adapter->need_crosstalk_fix = false;
+ break;
+ }
+
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
@@ -6217,6 +6331,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -6352,6 +6467,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -6367,7 +6483,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
if ((hw->mac.type == ixgbe_mac_82599EB) ||
(hw->mac.type == ixgbe_mac_X540) ||
(hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x)) {
+ (hw->mac.type == ixgbe_mac_X550EM_x) ||
+ (hw->mac.type == ixgbe_mac_x550em_a)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6509,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6680,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= ((u64)1 << i);
+ eics |= BIT_ULL(i);
}
}
@@ -6593,6 +6711,18 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter)
link_up = true;
}
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is empty.
+ */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full)
+ link_up = false;
+ }
+
if (adapter->ixgbe_ieee_pfc)
pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en);
@@ -6662,6 +6792,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -6938,6 +7069,16 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
s32 err;
+ /* If crosstalk fix enabled verify the SFP+ cage is full */
+ if (adapter->need_crosstalk_fix) {
+ u32 sfp_cage_full;
+
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ if (!sfp_cage_full)
+ return;
+ }
+
/* not searching for SFP so there is nothing to do here */
if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) &&
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
@@ -7122,10 +7263,12 @@ static void ixgbe_service_task(struct work_struct *work)
return;
}
#ifdef CONFIG_IXGBE_VXLAN
+ rtnl_lock();
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
vxlan_get_rx_port(adapter->netdev);
}
+ rtnl_unlock();
#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
@@ -7148,9 +7291,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7315,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7369,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+ IXGBE_TX_FLAGS_CC)))
return;
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- u8 l4_hdr = 0;
- union {
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- u8 *raw;
- } network_hdr;
- union {
- struct tcphdr *tcphdr;
- u8 *raw;
- } transport_hdr;
- __be16 frag_off;
-
- if (skb->encapsulation) {
- network_hdr.raw = skb_inner_network_header(skb);
- transport_hdr.raw = skb_inner_transport_header(skb);
- vlan_macip_lens = skb_inner_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- network_hdr.raw = skb_network_header(skb);
- transport_hdr.raw = skb_transport_header(skb);
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- }
-
- /* use first 4 bits to determine IP version */
- switch (network_hdr.ipv4->version) {
- case IPVERSION:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = network_hdr.ipv4->protocol;
- break;
- case 6:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- l4_hdr = network_hdr.ipv6->nexthdr;
- if (likely((transport_hdr.raw - network_hdr.raw) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ goto no_csum;
+ }
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbe_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, version=%d, l4 proto=%x\n",
- network_hdr.ipv4->version, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
- type_tucmd, mss_l4len_idx);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -8238,6 +8354,134 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
return 0;
}
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+ u8 *queue, u64 *action)
+{
+ unsigned int num_vfs = adapter->num_vfs, vf;
+ struct net_device *upper;
+ struct list_head *iter;
+
+ /* redirect to a SRIOV VF */
+ for (vf = 0; vf < num_vfs; ++vf) {
+ upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+ if (upper->ifindex == ifindex) {
+ if (adapter->num_rx_pools > 1)
+ *queue = vf * 2;
+ else
+ *queue = vf * adapter->num_rx_queues_per_pool;
+
+ *action = vf + 1;
+ *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ return 0;
+ }
+ }
+
+ /* redirect to a offloaded macvlan netdev */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ *action = *queue;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ const struct tc_action *a;
+ int err;
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tc_for_each_action(a, exts) {
+
+ /* Drop action */
+ if (is_tcf_gact_shot(a)) {
+ *action = IXGBE_FDIR_DROP_QUEUE;
+ *queue = IXGBE_FDIR_DROP_QUEUE;
+ return 0;
+ }
+
+ /* Redirect to a VF or a offloaded macvlan */
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+
+ err = handle_redirect_action(adapter, ifindex, queue,
+ action);
+ if (err == 0)
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ struct tc_cls_u32_offload *cls,
+ struct ixgbe_mat_field *field_ptr,
+ struct ixgbe_nexthdr *nexthdr)
+{
+ int i, j, off;
+ __be32 val, m;
+ bool found_entry = false, found_jump_field = false;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ m = cls->knode.sel->keys[i].mask;
+
+ for (j = 0; field_ptr[j].val; j++) {
+ if (field_ptr[j].off == off) {
+ field_ptr[j].val(input, mask, val, m);
+ input->filter.formatted.flow_type |=
+ field_ptr[j].type;
+ found_entry = true;
+ break;
+ }
+ }
+ if (nexthdr) {
+ if (nexthdr->off == cls->knode.sel->keys[i].off &&
+ nexthdr->val == cls->knode.sel->keys[i].val &&
+ nexthdr->mask == cls->knode.sel->keys[i].mask)
+ found_jump_field = true;
+ else
+ continue;
+ }
+ }
+
+ if (nexthdr && !found_jump_field)
+ return -EINVAL;
+
+ if (!found_entry)
+ return 0;
+
+ mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ return 0;
+}
+
static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
__be16 protocol,
struct tc_cls_u32_offload *cls)
@@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
u32 loc = cls->knode.handle & 0xfffff;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_mat_field *field_ptr;
- struct ixgbe_fdir_filter *input;
- union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tc_action *a;
-#endif
- int i, err = 0;
+ struct ixgbe_fdir_filter *input = NULL;
+ union ixgbe_atr_input *mask = NULL;
+ struct ixgbe_jump_table *jump = NULL;
+ int i, err = -EINVAL;
u8 queue;
u32 uhtid, link_uhtid;
- memset(&mask, 0, sizeof(union ixgbe_atr_input));
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
@@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* headers when needed.
*/
if (protocol != htons(ETH_P_IP))
- return -EINVAL;
-
- if (link_uhtid) {
- struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
- if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- if (!test_bit(link_uhtid - 1, &adapter->tables))
- return -EINVAL;
-
- for (i = 0; nexthdr[i].jump; i++) {
- if (nexthdr->o != cls->knode.sel->offoff ||
- nexthdr->s != cls->knode.sel->offshift ||
- nexthdr->m != cls->knode.sel->offmask ||
- /* do not support multiple key jumps its just mad */
- cls->knode.sel->nkeys > 1)
- return -EINVAL;
-
- if (nexthdr->off != cls->knode.sel->keys[0].off ||
- nexthdr->val != cls->knode.sel->keys[0].val ||
- nexthdr->mask != cls->knode.sel->keys[0].mask)
- return -EINVAL;
-
- adapter->jump_tables[link_uhtid] = nexthdr->jump;
- }
- return 0;
- }
+ return err;
if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
e_err(drv, "Location out of range\n");
- return -EINVAL;
+ return err;
}
/* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,87 +8522,123 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* this function _should_ be generic try not to hardcode values here.
*/
if (uhtid == 0x800) {
- field_ptr = adapter->jump_tables[0];
+ field_ptr = (adapter->jump_tables[0])->mat;
} else {
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- field_ptr = adapter->jump_tables[uhtid];
+ return err;
+ if (!adapter->jump_tables[uhtid])
+ return err;
+ field_ptr = (adapter->jump_tables[uhtid])->mat;
}
if (!field_ptr)
- return -EINVAL;
+ return err;
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
+ /* At this point we know the field_ptr is valid and need to either
+ * build cls_u32 link or attach filter. Because adding a link to
+ * a handle that does not exist is invalid and the same for adding
+ * rules to handles that don't exist.
+ */
- for (i = 0; i < cls->knode.sel->nkeys; i++) {
- int off = cls->knode.sel->keys[i].off;
- __be32 val = cls->knode.sel->keys[i].val;
- __be32 m = cls->knode.sel->keys[i].mask;
- bool found_entry = false;
- int j;
+ if (link_uhtid) {
+ struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
- for (j = 0; field_ptr[j].val; j++) {
- if (field_ptr[j].off == off) {
- field_ptr[j].val(input, &mask, val, m);
- input->filter.formatted.flow_type |=
- field_ptr[j].type;
- found_entry = true;
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return err;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
+ return err;
+
+ for (i = 0; nexthdr[i].jump; i++) {
+ if (nexthdr[i].o != cls->knode.sel->offoff ||
+ nexthdr[i].s != cls->knode.sel->offshift ||
+ nexthdr[i].m != cls->knode.sel->offmask)
+ return err;
+
+ jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+ if (!jump)
+ return -ENOMEM;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input) {
+ err = -ENOMEM;
+ goto free_jump;
+ }
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
+ jump->input = input;
+ jump->mask = mask;
+ err = ixgbe_clsu32_build_input(input, mask, cls,
+ field_ptr, &nexthdr[i]);
+ if (!err) {
+ jump->mat = nexthdr[i].jump;
+ adapter->jump_tables[link_uhtid] = jump;
break;
}
}
-
- if (!found_entry)
- goto err_out;
+ return 0;
}
- mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
-
- if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
- mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
-#ifdef CONFIG_NET_CLS_ACT
- if (list_empty(&cls->knode.exts->actions))
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+ if ((adapter->jump_tables[uhtid])->input)
+ memcpy(input, (adapter->jump_tables[uhtid])->input,
+ sizeof(*input));
+ if ((adapter->jump_tables[uhtid])->mask)
+ memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+ sizeof(*mask));
+ }
+ err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+ if (err)
goto err_out;
- list_for_each_entry(a, &cls->knode.exts->actions, list) {
- if (!is_tcf_gact_shot(a))
- goto err_out;
- }
-#endif
+ err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+ &queue);
+ if (err < 0)
+ goto err_out;
- input->action = IXGBE_FDIR_DROP_QUEUE;
- queue = IXGBE_FDIR_DROP_QUEUE;
input->sw_idx = loc;
spin_lock(&adapter->fdir_perfect_lock);
if (hlist_empty(&adapter->fdir_filter_list)) {
- memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
- err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, mask);
if (err)
goto err_out_w_lock;
- } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
err = -EINVAL;
goto err_out_w_lock;
}
- ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
if (!err)
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ kfree(mask);
return err;
err_out_w_lock:
spin_unlock(&adapter->fdir_perfect_lock);
err_out:
+ kfree(mask);
+free_input:
kfree(input);
- return -EINVAL;
+free_jump:
+ kfree(jump);
+ return err;
}
static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8515,11 +8765,6 @@ static int ixgbe_set_features(struct net_device *netdev,
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgbe_vlan_strip_enable(adapter);
- else
- ixgbe_vlan_strip_disable(adapter);
-
if (changed & NETIF_F_RXALL)
need_reset = true;
@@ -8536,6 +8781,9 @@ static int ixgbe_set_features(struct net_device *netdev,
if (need_reset)
ixgbe_do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ ixgbe_set_rx_mode(netdev);
return 0;
}
@@ -8833,17 +9081,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN 127
+#define IXGBE_MAX_NETWORK_HDR_LEN 511
+
static netdev_features_t
ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- if (!skb->encapsulation)
- return features;
-
- if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
- IXGBE_MAX_TUNNEL_HDR_LEN))
- return features & ~NETIF_F_CSUM_MASK;
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
return features;
}
@@ -8858,6 +9125,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
.ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
@@ -8943,7 +9211,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
/**
* ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
* @device_id: the device ID
* @subdev_id: the subsystem device ID
*
@@ -8951,19 +9219,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* which devices have WoL support
*
**/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
- int is_wol_supported = 0;
+ /* WOL not supported on 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ /* check eeprom to see if WOL is enabled for X540 and newer */
+ if (hw->mac.type >= ixgbe_mac_X540) {
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ return true;
+ }
+
+ /* WOL is determined based on device IDs for 82599 MACs */
switch (device_id) {
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
- case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
+ case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+ case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
/* only support first port */
if (hw->bus.func != 0)
break;
@@ -8971,66 +9253,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
- case IXGBE_SUBDEV_ID_82599_LOM_SFP:
- is_wol_supported = 1;
- break;
+ case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+ return true;
}
break;
case IXGBE_DEV_ID_82599EN_SFP:
- /* Only this subdevice supports WOL */
+ /* Only these subdevices support WOL */
switch (subdevice_id) {
case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
- is_wol_supported = 1;
- break;
+ return true;
}
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- is_wol_supported = 1;
+ return true;
break;
case IXGBE_DEV_ID_82599_KX4:
- is_wol_supported = 1;
- break;
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X550T:
- case IXGBE_DEV_ID_X550EM_X_KX4:
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X_10G_T:
- /* check eeprom to see if enabled wol */
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0))) {
- is_wol_supported = 1;
- }
+ return true;
+ default:
break;
}
- return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
- struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
- struct ixgbe_hw *hw = &adapter->hw;
- const unsigned char *addr;
-
- addr = of_get_mac_address(dp);
- if (addr) {
- ether_addr_copy(hw->mac.perm_addr, addr);
- return;
- }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
- ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+ return false;
}
/**
@@ -9136,23 +9383,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
/* EEPROM */
- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ hw->eeprom.ops = *ii->eeprom_ops;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
}
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
- if (!(eec & (1 << 8)))
+ if (!(eec & BIT(8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */
- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.ops = *ii->phy_ops;
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
/* ixgbe_identify_phy_generic will set prtad and mmds properly */
hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9416,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -9215,54 +9467,62 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto skip_sriov;
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
skip_sriov:
#endif
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
- netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
+ netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ IXGBE_GSO_PARTIAL_FEATURES;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
- netdev->hw_features |= NETIF_F_SCTP_CRC |
- NETIF_F_NTUPLE |
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL |
+ NETIF_F_HW_L2FW_DOFFLOAD;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- break;
- default:
- break;
- }
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
#ifdef CONFIG_IXGBE_DCB
- netdev->dcbnl_ops = &dcbnl_ops;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ netdev->dcbnl_ops = &dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9287,10 +9547,6 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9560,8 @@ skip_sriov:
goto err_sw_init;
}
- ixgbe_get_platform_mac_addr(adapter);
+ eth_platform_get_mac_address(&adapter->pdev->dev,
+ adapter->hw.mac.perm_addr);
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
@@ -9455,6 +9712,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
@@ -9483,6 +9741,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev;
bool disable_dev;
+ int i;
/* if !adapter then we already cleaned up in probe */
if (!adapter)
@@ -9532,6 +9791,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ if (adapter->jump_tables[i]) {
+ kfree(adapter->jump_tables[i]->input);
+ kfree(adapter->jump_tables[i]->mask);
+ }
+ kfree(adapter->jump_tables[i]);
+ }
+
kfree(adapter->mac_table);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -9612,6 +9879,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X550EM_x:
device_id = IXGBE_DEV_ID_X550EM_X_VF;
break;
+ case ixgbe_mac_x550em_a:
+ device_id = IXGBE_DEV_ID_X550EM_A_VF;
+ break;
default:
device_id = 0;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a471d..a0cb84381 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
size = mbx->size;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
return IXGBE_ERR_MBX;
- if (!mbx->ops.write)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.write(hw, msg, size, mbx_id);
+ return mbx->ops->write(hw, msg, size, mbx_id);
}
/**
@@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_msg)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_msg(hw, mbx_id);
+ return mbx->ops->check_for_msg(hw, mbx_id);
}
/**
@@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_ack)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_ack(hw, mbx_id);
+ return mbx->ops->check_for_ack(hw, mbx_id);
}
/**
@@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_rst)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_rst(hw, mbx_id);
+ return mbx->ops->check_for_rst(hw, mbx_id);
}
/**
@@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_msg)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_ack)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ret_val;
/* if ack received read message */
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
+ if (!mbx->ops || !mbx->timeout)
return IXGBE_ERR_MBX;
/* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ ret_val = mbx->ops->write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
@@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
- if (vflre & (1 << vf_shift)) {
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ if (vflre & BIT(vf_shift)) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
@@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_x550em_a &&
hw->mac.type != ixgbe_mac_X540)
return;
@@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
}
#endif /* CONFIG_PCI_IOV */
-struct ixgbe_mbx_operations mbx_ops_generic = {
+const struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f74..01c2667c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
-extern struct ixgbe_mbx_operations mbx_ops_generic;
+extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 74c53ad9d..a8bed3d88 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -38,6 +38,12 @@ struct ixgbe_mat_field {
unsigned int type;
};
+struct ixgbe_jump_table {
+ struct ixgbe_mat_field *mat;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input *mask;
+};
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
@@ -82,6 +88,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
{ .val = NULL } /* terminal node */
};
+static struct ixgbe_mat_field ixgbe_udp_fields[] = {
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
+ .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
+ { .val = NULL } /* terminal node */
+};
+
struct ixgbe_nexthdr {
/* offset, shift, and mask of position to next header */
unsigned int o;
@@ -98,6 +110,8 @@ struct ixgbe_nexthdr {
static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
{ .o = 0, .s = 6, .m = 0xf,
.off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+ { .o = 0, .s = 6, .m = 0xf,
+ .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
{ .jump = NULL } /* terminal node */
};
#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c84..cc735ec3e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -103,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */
-#define IXGBE_PE_BIT1 (1 << 1)
+#define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d41..e5431bfe3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
*/
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) |
+ BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
break;
default:
@@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
cc.shift = 2;
}
/* fallthrough */
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
@@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break;
default:
/* other devices aren't supported */
@@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f93..c5caacdd1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
- vfre &= ~(1 << vf_shift);
+ vfre &= ~BIT(vf_shift);
else
- vfre |= 1 << vf_shift;
+ vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
@@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 i;
+ u32 vlvfb_mask, pool_mask, i;
+
+ /* create mask for VF and other pools */
+ pool_mask = ~BIT(VMDQ_P(0) % 32);
+ vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) {
u32 bits[2], vlvfb, vid, vfta, vlvf;
u32 word = i * 2 + vf / 32;
- u32 mask = 1 << (vf % 32);
+ u32 mask;
vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* if our bit isn't set we can skip it */
- if (!(vlvfb & mask))
+ if (!(vlvfb & vlvfb_mask))
continue;
/* clear our bit from vlvfb */
- vlvfb ^= mask;
+ vlvfb ^= vlvfb_mask;
/* create 64b mask to chedk to see if we should clear VLVF */
bits[word % 2] = vlvfb;
bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
- /* if promisc is enabled, PF will be present, leave VFTA */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
- bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
-
- if (bits[0] || bits[1])
- goto update_vlvfb;
- goto update_vlvf;
- }
-
/* if other pools are present, just remove ourselves */
- if (bits[0] || bits[1])
+ if (bits[(VMDQ_P(0) / 32) ^ 1] ||
+ (bits[VMDQ_P(0) / 32] & pool_mask))
goto update_vlvfb;
+ /* if PF is present, leave VFTA */
+ if (bits[0] || bits[1])
+ goto update_vlvf;
+
/* if we cannot determine VLAN just remove ourselves */
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
if (!vlvf)
goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK;
- mask = 1 << (vid % 32);
+ mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
update_vlvf:
/* clear POOL selection enable */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ vlvfb = 0;
update_vlvfb:
/* clear pool bits */
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
@@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
@@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~(1 << vf_shift);
+ reg &= ~BIT(vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
@@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
- reg |= (1 << vf_shift);
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/*
@@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return -1;
}
- if (adapter->vfinfo[vf].pf_set_mac &&
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
!ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
@@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add)
return 0;
- err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (err)
- return err;
-
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
-
- return 0;
+ return ixgbe_set_vf_vlan(adapter, add, vid, vf);
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -961,8 +950,11 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
- if (adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ }
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1318,9 +1310,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1353,9 +1342,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1395,7 +1381,7 @@ out:
return err;
}
-static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
{
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
@@ -1522,27 +1508,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw;
- u32 regval;
if (vf >= adapter->num_vfs)
return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
-
- if (adapter->vfinfo[vf].vlan_count) {
- vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+ /* configure MAC spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
+
+ /* configure VLAN spoofing */
+ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+ * calling set_ethertype_anti_spoofing for each VF in loop below
+ */
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad925706..47e65e2f8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a08..da3d8358f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -59,8 +59,12 @@
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -75,21 +79,25 @@
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV 0x1564
-#define IXGBE_DEV_ID_X550_VF 0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -128,7 +136,7 @@
#define IXGBE_FLA_X540 IXGBE_FLA_8259X
#define IXGBE_FLA_X550 IXGBE_FLA_8259X
#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
#define IXGBE_GRC_X550EM_a 0x15F64
#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
-#define IXGBE_SRAMREL_8259X 0x10210
-#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_a 0x15F6C
-#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
-
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
/* DCB registers */
#define MAX_TRAFFIC_CLASS 8
#define X540_TRAFFIC_CLASS 4
+#define DEF_TRAFFIC_CLASS 1
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */
#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */
+#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */
#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */
#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */
/* FCoE Receive Control */
#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */
#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */
@@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
#define IXGBE_TDPROBE 0x07F20
#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
#define IXGBE_RFVAL 0x050A4
#define IXGBE_MDFTC1 0x042B8
@@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4
@@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1740,7 +1743,7 @@ enum {
#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
@@ -1866,20 +1869,20 @@ enum {
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
@@ -1957,7 +1960,9 @@ enum {
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -1997,6 +2002,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2111,6 +2119,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
@@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type {
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0x0A
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen {
u16 pad3;
};
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ __be16 address;
+ u16 rsv1;
+ __be32 write_data;
+ u16 pad;
+} __packed;
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 read_data;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword {
IXGBE_CAT(EEC, m), \
IXGBE_CAT(FLA, m), \
IXGBE_CAT(GRC, m), \
- IXGBE_CAT(SRAMREL, m), \
IXGBE_CAT(FACTPS, m), \
IXGBE_CAT(SWSM, m), \
IXGBE_CAT(SWFW_SYNC, m), \
@@ -2989,6 +3037,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540,
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
+ ixgbe_mac_x550em_a,
ixgbe_num_macs
};
@@ -3017,6 +3066,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
+ ixgbe_phy_sgmii,
ixgbe_phy_generic
};
@@ -3130,8 +3180,9 @@ struct ixgbe_bus_info {
enum ixgbe_bus_width width;
enum ixgbe_bus_type type;
- u16 func;
- u16 lan_id;
+ u8 func;
+ u8 lan_id;
+ u8 instance_id;
};
/* Flow control parameters */
@@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations {
s32 (*dmac_config)(struct ixgbe_hw *hw);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
@@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats {
};
struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
+ const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 usec_delay;
@@ -3475,10 +3530,10 @@ struct ixgbe_hw {
struct ixgbe_info {
enum ixgbe_mac_type mac;
s32 (*get_invariants)(struct ixgbe_hw *);
- struct ixgbe_mac_operations *mac_ops;
- struct ixgbe_eeprom_operations *eeprom_ops;
- struct ixgbe_phy_operations *phy_ops;
- struct ixgbe_mbx_operations *mbx_ops;
+ const struct ixgbe_mac_operations *mac_ops;
+ const struct ixgbe_eeprom_operations *eeprom_ops;
+ const struct ixgbe_phy_operations *phy_ops;
+ const struct ixgbe_mbx_operations *mbx_ops;
const u32 *mvals;
};
@@ -3517,14 +3572,19 @@ struct ixgbe_info {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV_MASK (3 << 6)
+#define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -3532,43 +3592,54 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
+
+#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3584,12 +3655,17 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
-#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b7d..f2b1d48a1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
@@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return 0;
}
-static struct ixgbe_mac_operations mac_ops_X540 = {
+static const struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.start_hw = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
@@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
.read_buffer = &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
-static struct ixgbe_phy_operations phy_ops_X540 = {
+static const struct ixgbe_phy_operations phy_ops_X540 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
.init = NULL,
@@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X540)
};
-struct ixgbe_info ixgbe_X540_info = {
+const struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1f4..e21cd4849 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 68a9c6464..19b75cd98 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
+ * Copyright(c) 1999 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include "ixgbe_phy.h"
static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
@@ -272,16 +273,26 @@ out:
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
+ /* Fallthrough */
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -412,6 +423,121 @@ out:
return ret;
}
+/**
+ * ixgbe_get_phy_token - Get the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ return IXGBE_ERR_FW_RESP_INVALID;
+
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = cpu_to_be16(reg_addr);
+ write_cmd.write_data = cpu_to_be32(data);
+
+ return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ s32 status;
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = cpu_to_be16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* Extract the register value from the response. */
+ *data = be32_to_cpu(hic.rsp.read_data);
+
+ return status;
+}
+
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
* command assuming that the semaphore is already obtained.
* @hw: pointer to hardware structure
@@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
/* one word */
buffer.length = cpu_to_be16(sizeof(u16));
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
if (status)
return status;
@@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ status = ixgbe_host_interface_command(hw, &buffer,
sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT,
false);
@@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
buffer.data = data;
buffer.address = cpu_to_be32(offset * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
buffer.req.checksum = FW_DEFAULT_CHECKSUM;
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- fw_cmd.port_number = (u8)hw->bus.lan_id;
+ fw_cmd.port_number = hw->bus.lan_id;
- status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ status = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(struct ixgbe_hic_disable_rxen),
IXGBE_HI_COMMAND_TIMEOUT, true);
@@ -1248,6 +1371,117 @@ i2c_err:
}
/**
+ * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for native SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ bool setup_linear = false;
+ u32 reg_phy_int;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for native SFI */
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_phy_int);
+ if (rc)
+ return rc;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_phy_int);
+ if (rc)
+ return rc;
+
+ /* Setup XFI/SFI internal link */
+ return ixgbe_setup_ixfi_x550em(hw, &speed);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ u32 reg_slice, slice_offset;
+ bool setup_linear = false;
+ u16 reg_phy_ext;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Get external PHY device id */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ if (rc)
+ return rc;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
+ reg_phy_ext);
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
return 0;
}
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1342,15 +1627,35 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ break;
+ default:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
+ break;
+ }
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
+ return;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
break;
}
}
@@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
@@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
@@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
if (status)
return status;
@@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/* Restart auto-negotiation. */
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
return status;
}
-/** ixgbe_setup_kr_x550em - Configure the KR PHY.
- * @hw: pointer to hardware structure
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+{
+ bool pause, asm_dir;
+ u32 reg_val;
+ s32 rc;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = false;
+ asm_dir = false;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = false;
+ asm_dir = true;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ /* Fallthrough */
+ case ixgbe_fc_full:
+ pause = true;
+ asm_dir = true;
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
+ return 0;
+
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_val);
+ if (rc)
+ return rc;
+
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+
+ return rc;
+}
+
/** ixgbe_enter_lplu_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
@@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values.
+ */
+static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (!hw->phy.nw_mng_if_sel) {
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ struct ixgbe_adapter *adapter = hw->back;
+
+ e_warn(drv, "nw_mng_if_sel not set\n");
+ }
+ return;
+ }
+
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
}
/* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ hw->phy.type = ixgbe_phy_sgmii;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- media_type = ixgbe_media_type_copper;
+ media_type = ixgbe_media_type_copper;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ u32 hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
/** ixgbe_reset_hw_X550em - Perform hardware reset
** @hw: pointer to hardware structure
**
@@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
- u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
+ ixgbe_set_mdio_speed(hw);
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
@@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, mask);
}
+/**
+ * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared PHY token as needed
+ */
+static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status;
+
+ while (--retries) {
+ status = 0;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status)
+ return status;
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return 0;
+
+ status = ixgbe_get_phy_token(hw);
+ if (!status)
+ return 0;
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
+ msleep(FW_PHY_TOKEN_DELAY);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Release the SWFW semaphore and puts the shared PHY token as needed
+ */
+static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ */
+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ */
+static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \
- .prot_autoc_read = &prot_autoc_read_generic, \
- .prot_autoc_write = &prot_autoc_write_generic, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
-static struct ixgbe_mac_operations mac_ops_X550 = {
+static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
@@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.setup_sfp = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
};
-static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
.release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL, /* defined later */
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
+static struct ixgbe_mac_operations mac_ops_x550em_a = {
+ X550_COMMON_MAC
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
#define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
-static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X550,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X540,
};
@@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
- .read_reg = &ixgbe_read_phy_reg_generic, \
- .write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
.set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
-static struct ixgbe_phy_operations phy_ops_X550 = {
+static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
};
-static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
&ixgbe_write_i2c_combined_generic_unlocked,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_x550a,
+ .write_reg = &ixgbe_write_phy_reg_x550a,
+};
+
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550)
};
@@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-struct ixgbe_info ixgbe_X550_info = {
+static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_a)
+};
+
+const struct ixgbe_info ixgbe_X550_info = {
.mac = ixgbe_mac_X550,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X550,
@@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = {
.mvals = ixgbe_mvals_X550,
};
-struct ixgbe_info ixgbe_X550EM_x_info = {
+const struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_X550EM_x,
};
+
+const struct ixgbe_info ixgbe_x550em_a_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
+ .mac_ops = &mac_ops_x550em_a,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 58434584b..ae09d60e7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -33,6 +33,11 @@
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
#define IXGBE_VF_MAX_RX_QUEUES 8
@@ -74,7 +79,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -296,16 +301,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index d7aa4b203..508e72c5f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- struct {
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
- };
+ int type;
+ int sizeof_stat;
+ int stat_offset;
};
-#define IXGBEVF_STAT(m, b, r) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
- .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .type = IXGBEVF_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
}
-#define IXGBEVF_ZSTAT(m) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = -1, \
- .saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
- stats.saved_reset_vfgprc)},
- {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
- stats.saved_reset_vfgptc)},
- {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
- stats.saved_reset_vfgorc)},
- {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
- stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
- {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
- {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
- {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
- stats.saved_reset_vfmprc)},
- {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
- {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
- {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
- {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
- {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
- {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
- {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+ IXGBEVF_NETDEV_STAT(rx_packets),
+ IXGBEVF_NETDEV_STAT(tx_packets),
+ IXGBEVF_NETDEV_STAT(rx_bytes),
+ IXGBEVF_NETDEV_STAT(tx_bytes),
+ IXGBEVF_STAT("tx_busy", tx_busy),
+ IXGBEVF_STAT("tx_restart_queue", restart_queue),
+ IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+ IXGBEVF_NETDEV_STAT(multicast),
+ IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
};
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len);
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+ /* generate a number suitable for ethtool's register version */
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@ clear_reset:
return err;
}
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
+ return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
+ return IXGBEVF_STATS_LEN;
default:
return -EINVAL;
}
@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- char *base = (char *)adapter;
- int i;
-#ifdef BP_EXTENDED_STATS
- u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
- tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ unsigned int start;
+ struct ixgbevf_ring *ring;
+ int i, j;
+ char *p;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i]->stats.yields;
- rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
- rx_yields += adapter->rx_ring[i]->stats.yields;
- }
+ ixgbevf_update_stats(adapter);
+ net_stats = dev_get_stats(netdev, &temp);
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ switch (ixgbevf_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *)net_stats +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBEVF_STATS:
+ p = (char *)adapter +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i]->stats.yields;
- tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
- tx_yields += adapter->tx_ring[i]->stats.yields;
+ data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- adapter->bp_rx_yields = rx_yields;
- adapter->bp_rx_cleaned = rx_cleaned;
- adapter->bp_rx_missed = rx_missed;
+ /* populate Tx queue data */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
+ }
- adapter->bp_tx_yields = tx_yields;
- adapter->bp_tx_cleaned = tx_cleaned;
- adapter->bp_tx_missed = tx_missed;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
#endif
+ }
- ixgbevf_update_stats(adapter);
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = base + ixgbe_gstrings_stats[i].stat_offset;
- char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
- if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
- else
- data[i] = *(u64 *)p;
- } else {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
- else
- data[i] = *(u32 *)p;
+ /* populate Rx queue data */
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
}
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
break;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 991eeae81..d5944c391 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM BIT(0)
+#define IXGBE_TX_FLAGS_VLAN BIT(1)
+#define IXGBE_TX_FLAGS_TSO BIT(2)
+#define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -403,13 +403,6 @@ struct ixgbevf_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
-#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
-
struct msix_entry *msix_entries;
/* OS defined structs */
@@ -429,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
-#ifdef BP_EXTENDED_STATS
- u64 bp_rx_yields;
- u64 bp_rx_cleaned;
- u64 bp_rx_missed;
-
- u64 bp_tx_yields;
- u64 bp_tx_cleaned;
- u64 bp_tx_missed;
-#endif
-
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -461,13 +444,19 @@ enum ixbgevf_state_t {
__IXGBEVF_REMOVING,
__IXGBEVF_SERVICE_SCHED,
__IXGBEVF_SERVICE_INITED,
+ __IXGBEVF_RESET_REQUESTED,
+ __IXGBEVF_QUEUE_RESET_REQUESTED,
};
enum ixgbevf_boards {
board_82599_vf,
+ board_82599_vf_hv,
board_X540_vf,
+ board_X540_vf_hv,
board_X550_vf,
+ board_X550_vf_hv,
board_X550EM_x_vf,
+ board_X550EM_x_vf_hv,
};
enum ixgbevf_xcast_modes {
@@ -482,6 +471,12 @@ extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
+
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b0edae94d..acc24010c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -62,10 +62,14 @@ static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
- [board_82599_vf] = &ixgbevf_82599_vf_info,
- [board_X540_vf] = &ixgbevf_X540_vf_info,
- [board_X550_vf] = &ixgbevf_X550_vf_info,
- [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +82,13 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
*/
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
/* required last entry */
{0, }
};
@@ -268,7 +276,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
{
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
ixgbevf_service_event_schedule(adapter);
}
}
@@ -288,9 +296,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev)
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: board private structure
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *tx_ring)
+ struct ixgbevf_ring *tx_ring, int napi_budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +337,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1022,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- ixgbevf_for_each_ring(ring, q_vector->tx)
- clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ ixgbevf_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
if (budget <= 0)
return budget;
@@ -1035,7 +1046,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1052,7 +1064,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
- 1 << q_vector->v_idx);
+ BIT(q_vector->v_idx));
return 0;
}
@@ -1154,14 +1166,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
}
/* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= 1 << v_idx;
+ adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */
- adapter->eims_other = 1 << v_idx;
+ adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other;
}
@@ -1585,8 +1597,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
- 32; /* PTHRESH = 32 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
@@ -1642,7 +1654,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1)
- psrtype |= 1 << 29;
+ psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
@@ -1748,9 +1760,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
+#ifndef CONFIG_SPARC
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1791,7 +1809,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
@@ -1904,7 +1922,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+ hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -1984,7 +2002,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
hw->mbx.timeout = 0;
/* wait for watchdog to come around and bail us out */
- adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
}
return 0;
@@ -2052,7 +2070,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
- err = ixgbevf_negotiate_api_version(hw, api[idx]);
+ err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err)
break;
idx++;
@@ -2749,11 +2767,9 @@ static void ixgbevf_service_timer(unsigned long data)
static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
{
- if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
-
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -2795,7 +2811,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= 1 << i;
+ eics |= BIT(i);
}
/* Cause software interrupt to ensure rings are cleaned */
@@ -2821,7 +2837,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/* if check for link returns error we will need to reset */
if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
link_up = false;
}
@@ -3222,11 +3238,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
+ &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
-
/* if interface is down do nothing */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3271,9 +3286,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3286,49 +3310,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* update GSO size and bytecount with header size */
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+ mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3337,76 +3365,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
+static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 l4_hdr = 0;
- __be16 frag_off;
-
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
- if (likely(skb_network_header_len(skb) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto no_csum;
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbevf_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, l3 proto=%x, l4 proto=%x\n",
- first->protocol, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto no_csum;
}
-
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
}
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3442,7 +3449,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO)
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -3747,7 +3754,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
+ hw->mac.ops.set_rlpml(hw, max_frame);
return 0;
}
@@ -3890,6 +3897,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
+#define IXGBEVF_MAX_MAC_HDR_LEN 127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3908,7 +3949,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = ixgbevf_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -4013,26 +4054,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IXGBEVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index dc68fea48..2819abc45 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
@@ -111,7 +111,7 @@ out:
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.check_for_rst = ixgbevf_check_for_rst_vf,
};
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d613a4f2..e670d3b19 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -27,6 +27,12 @@
#include "vf.h"
#include "ixgbevf.h"
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET 0x201
+
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -126,6 +132,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+ struct ixgbevf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ pci_read_config_byte(adapter->pdev,
+ (i + IXGBE_HV_RESET_OFFSET),
+ &hw->mac.perm_addr[i]);
+ return 0;
+#else
+ pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+ return -EOPNOTSUPP;
+#endif
+}
+
+/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
@@ -258,6 +285,11 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
return ret_val;
}
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ return -EOPNOTSUPP;
+}
+
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
* @adapter: pointer to the port handle
@@ -416,6 +448,26 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val;
}
+/**
+ * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ if (ether_addr_equal(addr, hw->mac.perm_addr))
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
u32 *msg, u16 size)
{
@@ -473,15 +525,22 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_update_xcast_mode - Update Multicast mode
* @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
* @xcast_mode: new multicast mode
*
* Updates the Multicast Mode of VF.
**/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
@@ -513,6 +572,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -551,6 +618,15 @@ mbx_err:
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
@@ -656,11 +732,72 @@ out:
}
/**
- * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static void ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
@@ -670,11 +807,30 @@ void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
}
/**
- * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static void ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+}
+
+/**
+ * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
@@ -703,6 +859,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return 0;
+}
+
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
@@ -769,11 +940,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
+ .set_rlpml = ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_hv_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_hv_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
+ .set_rar = ixgbevf_hv_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
+ .set_vfta = ixgbevf_hv_set_vfta_vf,
+ .set_rlpml = ixgbevf_hv_set_rlpml_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -781,17 +971,37 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index ef9f7736b..2cac610f3 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ void (*set_rlpml)(struct ixgbe_hw *, u16);
};
enum ixgbe_mac_type {
@@ -207,8 +209,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 3ddf657bc..836ebd8ee 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme)
jwrite32f(jme, JME_GHC, jme->reg_ghc);
}
-static inline void
+static void
jme_reset_mac_processor(struct jme_adapter *jme)
{
static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4e5..1799fe141 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev,
writel(0x10, &ch->dmac);
while (!(readl(&ch->dmas) & DMA_STAT_HALT))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
writel(0, &ch->dmas);
}
@@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
}
dma_cache_wback((u32) td, sizeof(*td));
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
&(lp->tx_dma_regs->dmandptr));
lp->tx_chain_status = desc_empty;
lp->tx_chain_head = lp->tx_chain_tail;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if (dmas & DMA_STAT_ERR)
printk(KERN_ERR "%s: DMA error\n", dev->name);
@@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev)
/* reset ethernet logic */
writel(0, &lp->eth_regs->ethintfc);
while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Enable Ethernet Interface */
writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index b630ef1e9..dc82b1b19 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -519,7 +519,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
byte_offset = CPHYSADDR(skb->data) % 16;
ch->skb[ch->dma.desc] = skb;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
@@ -657,7 +657,7 @@ ltq_etop_tx_timeout(struct net_device *dev)
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
return;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351..f92018b13 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -244,7 +244,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index c442f6ad1..54d5154ac 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -286,12 +286,12 @@ static int pxa168_eth_stop(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{
- return readl(pep->base + offset);
+ return readl_relaxed(pep->base + offset);
}
static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
{
- writel(data, pep->base + offset);
+ writel_relaxed(data, pep->base + offset);
}
static void abort_dma(struct pxa168_eth_private *pep)
@@ -342,9 +342,9 @@ static void rxq_refill(struct net_device *dev)
pep->rx_skb[used_rx_desc] = skb;
/* Return the descriptor to DMA ownership */
- wmb();
+ dma_wmb();
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
- wmb();
+ dma_wmb();
/* Move the used descriptor pointer to the next descriptor */
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
@@ -794,7 +794,7 @@ static int rxq_process(struct net_device *dev, int budget)
rx_used_desc = pep->rx_used_desc_q;
rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
- rmb();
+ dma_rmb();
if (cmd_sts & (BUF_OWNED_BY_DMA))
break;
skb = pep->rx_skb[rx_curr_desc];
@@ -981,8 +981,6 @@ static int pxa168_init_phy(struct net_device *dev)
pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
if (IS_ERR(pep->phy))
return PTR_ERR(pep->phy);
- if (!pep->phy)
- return -ENODEV;
err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
pep->phy_intf);
@@ -1289,7 +1287,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- wmb();
+ dma_wmb();
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
wmb();
@@ -1297,7 +1295,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += length;
stats->tx_packets++;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
/* We handled the current skb, but now we are out of space.*/
netif_stop_queue(dev);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ec0a22119..467138b42 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
sky2_read32(hw, B0_IMSK);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&hw->napi);
netif_tx_disable(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e0b68afea..d1cdc2d76 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
@@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
@@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth)
return 0;
err_free_bus:
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
err_put_node:
of_node_put(mii_np);
@@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
mdiobus_unregister(eth->mii_bus);
of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
+ mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -453,20 +481,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ &eth->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
@@ -474,19 +505,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -536,7 +567,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_buf *tx_buf;
- unsigned long flags;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
@@ -568,11 +598,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
return -ENOMEM;
- /* normally we can rely on the stack not calling this more than once,
- * however we have 2 queues running ont he same ring so we need to lock
- * the ring access
- */
- spin_lock_irqsave(&eth->page_lock, flags);
WRITE_ONCE(itxd->txd1, mapped_addr);
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -609,8 +634,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd1, mapped_addr);
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
+ last_frag * TX_DMA_LS0));
WRITE_ONCE(txd->txd4, 0);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
@@ -632,8 +656,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -652,7 +674,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
mtk_tx_unmap(&dev->dev, tx_buf);
@@ -661,8 +683,6 @@ err_dma:
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
} while (itxd != txd);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
return -ENOMEM;
}
@@ -681,7 +701,43 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
nfrags += skb_shinfo(skb)->nr_frags;
}
- return DIV_ROUND_UP(nfrags, 2);
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_stop_queue(eth->netdev[i]);
+ }
}
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -690,14 +746,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct net_device_stats *stats = &dev->stats;
+ unsigned long flags;
bool gso = false;
int tx_num;
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock_irqsave(&eth->page_lock, flags);
+
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -719,16 +783,15 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
- netif_stop_queue(dev);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- netif_wake_queue(dev);
- }
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ mtk_stop_queue(eth);
+
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_OK;
drop:
+ spin_unlock_irqrestore(&eth->page_lock, flags);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -777,6 +840,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
@@ -784,6 +848,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
put_page(virt_to_head_page(new_data));
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
@@ -872,7 +937,6 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
}
mtk_tx_unmap(eth->dev, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -897,13 +961,9 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
if (!total)
return 0;
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] ||
- unlikely(!netif_queue_stopped(eth->netdev[i])))
- continue;
- if (atomic_read(&ring->free_count) > ring->thresh)
- netif_wake_queue(eth->netdev[i]);
- }
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
return total;
}
@@ -983,9 +1043,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
@@ -1163,6 +1222,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
@@ -1176,7 +1243,7 @@ static void mtk_tx_timeout(struct net_device *dev)
eth->netdev[mac->id]->stats.tx_errors++;
netif_err(eth, tx_err, dev,
"transmit timed out\n");
- schedule_work(&mac->pending_work);
+ schedule_work(&eth->pending_work);
}
static irqreturn_t mtk_handle_irq(int irq, void *_eth)
@@ -1225,7 +1292,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
@@ -1339,7 +1406,7 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
@@ -1413,19 +1480,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void mtk_pending_work(struct work_struct *work)
{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ int err, i;
+ unsigned long restart = 0;
rtnl_lock();
- mtk_stop(dev);
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!test_bit(i, &restart))
+ continue;
+ err = mtk_open(eth->netdev[i]);
+ if (err) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(eth->netdev[i]);
+ }
}
rtnl_unlock();
}
@@ -1435,15 +1513,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
if (!eth->netdev[i])
continue;
unregister_netdev(eth->netdev[i]);
free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
}
+ cancel_work_sync(&eth->pending_work);
return 0;
}
@@ -1631,7 +1707,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->id = id;
mac->hw = eth;
mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
@@ -1645,6 +1720,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1678,10 +1754,6 @@ static int mtk_probe(struct platform_device *pdev)
struct mtk_eth *eth;
int err;
- err = device_reset(&pdev->dev);
- if (err)
- return err;
-
match = of_match_device(of_mtk_match, &pdev->dev);
soc = (struct mtk_soc_data *)match->data;
@@ -1736,6 +1808,7 @@ static int mtk_probe(struct platform_device *pdev)
eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(&eth->pending_work, mtk_pending_work);
err = mtk_hw_init(eth);
if (err)
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 48a5292c8..a5eb7c623 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -91,6 +91,7 @@
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
@@ -357,12 +358,14 @@ struct mtk_rx_ring {
* @rx_ring: Pointer to the memore holding info about the RX ring
* @rx_napi: The NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
* @clk_ethif: The ethif clock
* @clk_esw: The switch clock
* @clk_gp1: The gmac1 clock
* @clk_gp2: The gmac2 clock
* @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_eth {
@@ -383,12 +386,14 @@ struct mtk_eth {
struct mtk_rx_ring rx_ring;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
struct clk *clk_ethif;
struct clk *clk_esw;
struct clk *clk_gp1;
struct clk *clk_gp2;
struct mii_bus *mii_bus;
+ struct work_struct pending_work;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -398,7 +403,6 @@ struct mtk_eth {
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
* @phy_dev: The attached PHY if available
- * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_mac {
int id;
@@ -406,7 +410,6 @@ struct mtk_mac {
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
struct phy_device *phy_dev;
- struct work_struct pending_work;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 0c51c69f8..249a45844 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -576,41 +576,48 @@ out:
return res;
}
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
+ struct mlx4_buf *buf, gfp_t gfp)
{
dma_addr_t t;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
- if (!buf->direct.buf)
- return -ENOMEM;
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->direct.buf =
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ size, &t, gfp);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- buf->direct.map = t;
+ buf->direct.map = t;
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- memset(buf->direct.buf, 0, size);
+ return 0;
+}
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf, gfp_t gfp)
+{
+ if (size <= max_direct) {
+ return mlx4_buf_direct_alloc(dev, size, buf, gfp);
} else {
+ dma_addr_t t;
int i;
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
+ buf->direct.buf = NULL;
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
gfp);
@@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_alloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE,
- &t, gfp);
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, gfp);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
-
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
}
}
@@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
+ if (buf->nbufs == 1) {
dma_free_coherent(&dev->persist->pdev->dev, size,
- buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
+ buf->direct.buf, buf->direct.map);
+ } else {
+ int i;
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
@@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct)
+ int size)
{
int err;
@@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
*wqres->db.db = 0;
- err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
if (err)
goto err_db;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c3f..f04a423ff 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index af975a2b7..132cea655 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
*/
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
- cq->buf_size, 2 * PAGE_SIZE);
+ cq->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
- err = mlx4_en_map_buffer(&cq->wqres.buf);
- if (err)
- goto err_res;
-
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
-err_res:
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
*pcq = NULL;
@@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
- mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
cq->is_tx == RX)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb..44cf16d01 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
@@ -1714,6 +1719,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -1723,23 +1730,26 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
-
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
+ mlx4_en_safe_replace_resources(priv, tmp);
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
@@ -1757,8 +1767,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b4b258c8c..8359e9e51 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -406,14 +406,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +425,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -438,7 +442,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1296,15 +1300,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1856,6 +1861,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ rtnl_lock();
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
@@ -1863,6 +1869,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
+ rtnl_unlock();
}
static void mlx4_en_clear_stats(struct net_device *dev)
@@ -1874,7 +1881,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1890,6 +1896,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1943,7 +1954,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -1968,7 +1979,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
@@ -2025,11 +2036,91 @@ err:
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2037,7 +2128,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
@@ -2057,12 +2151,17 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2355,8 +2454,12 @@ out:
}
/* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2365,8 +2468,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
/* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL);
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2425,7 +2532,23 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
netdev_features_t features)
{
features = vlan_features_check(skb, features);
- return vxlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+ * support inner IPv6 checksums and segmentation so we need to
+ * strip that feature if this is an IPv6 encapsulated frame.
+ */
+ if (skb->encapsulation &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+
+ return features;
}
#endif
@@ -2461,7 +2584,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2493,7 +2616,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2907,7 +3030,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
- MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
@@ -2990,8 +3113,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
mdev->pndev[port] = dev;
@@ -3071,6 +3199,8 @@ int mlx4_en_reset_config(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -3087,19 +3217,29 @@ int mlx4_en_reset_config(struct net_device *dev,
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3133,11 +3273,6 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -3146,6 +3281,8 @@ int mlx4_en_reset_config(struct net_device *dev,
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e67..5aa8b751f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 02e925d6f..a6b0db0e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
return ret;
}
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
- struct page **pages;
- int i;
-
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return 0;
-
- pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
-
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return;
-
- vunmap(buf->direct.buf);
-}
-
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ca3a38421..99b5407f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -394,17 +394,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
- ring->buf_size, 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map RX buffer\n");
- goto err_hwq;
- }
ring->buf = ring->wqres.buf.direct.buf;
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -412,8 +406,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
*pring = ring;
return 0;
-err_hwq:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -517,15 +509,11 @@ void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a386f047c..76aa4d271 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/tcp.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -93,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
- 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map TX buffer\n");
- goto err_hwq_res;
- }
-
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
MLX4_RESERVE_ETH_BF_QP);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
- goto err_map;
+ goto err_hwq_res;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
@@ -182,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -735,12 +726,12 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool inline_ok;
u32 ring_cons;
- if (!priv->port_up)
- goto tx_drop;
-
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
@@ -920,8 +911,18 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ind, fragptr);
if (skb->encapsulation) {
- struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
- if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ u8 proto;
+
+ ip.hdr = skb_inner_network_header(skb);
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
+ if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
else
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
@@ -1029,7 +1030,7 @@ tx_drop_unmap:
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
+ ring->tx_dropped++;
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12c77a70a..546fab0ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3222,6 +3222,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
@@ -4134,8 +4135,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 6aa73972d..f2d092001 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1102,7 +1102,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
- int index, prev;
+ int index = -1, prev;
int link = 0;
int i;
int err;
@@ -1181,7 +1181,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
out:
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH && index != -1) {
/* manage the steering entry for promisc mode */
if (new_entry)
err = new_steering_entry(dev, port, steer,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 63b1aeae2..13d297ee3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -270,6 +270,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
@@ -352,12 +353,14 @@ struct mlx4_en_port_profile {
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -482,8 +485,6 @@ struct mlx4_en_priv {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
@@ -624,8 +625,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
@@ -672,8 +676,6 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
int loopback);
void mlx4_en_calc_rx_buf(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index f5c3b9465..1cf722eba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -31,10 +31,3 @@ config MLX5_CORE_EN_DCB
This flag is depended on the kernel's DCB support.
If unsure, set to Y
-
-config MLX5_CORE_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index bf65b71c7..9ea7b5830 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -2,11 +2,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o fs_counters.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o en_tc.o
+ en_txrx.o en_clock.o vxlan.o en_tc.o en_arfs.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index eb926e1ee..d6e2a1cae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -294,6 +294,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -320,8 +327,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
@@ -341,7 +346,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -389,12 +393,15 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -406,178 +413,143 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
const char *mlx5_command_str(int command)
{
- switch (command) {
- case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
- return "QUERY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
- return "MODIFY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_QUERY_HCA_CAP:
- return "QUERY_HCA_CAP";
-
- case MLX5_CMD_OP_SET_HCA_CAP:
- return "SET_HCA_CAP";
-
- case MLX5_CMD_OP_QUERY_ADAPTER:
- return "QUERY_ADAPTER";
-
- case MLX5_CMD_OP_INIT_HCA:
- return "INIT_HCA";
-
- case MLX5_CMD_OP_TEARDOWN_HCA:
- return "TEARDOWN_HCA";
-
- case MLX5_CMD_OP_ENABLE_HCA:
- return "MLX5_CMD_OP_ENABLE_HCA";
-
- case MLX5_CMD_OP_DISABLE_HCA:
- return "MLX5_CMD_OP_DISABLE_HCA";
-
- case MLX5_CMD_OP_QUERY_PAGES:
- return "QUERY_PAGES";
-
- case MLX5_CMD_OP_MANAGE_PAGES:
- return "MANAGE_PAGES";
-
- case MLX5_CMD_OP_CREATE_MKEY:
- return "CREATE_MKEY";
-
- case MLX5_CMD_OP_QUERY_MKEY:
- return "QUERY_MKEY";
-
- case MLX5_CMD_OP_DESTROY_MKEY:
- return "DESTROY_MKEY";
-
- case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
- return "QUERY_SPECIAL_CONTEXTS";
-
- case MLX5_CMD_OP_CREATE_EQ:
- return "CREATE_EQ";
-
- case MLX5_CMD_OP_DESTROY_EQ:
- return "DESTROY_EQ";
-
- case MLX5_CMD_OP_QUERY_EQ:
- return "QUERY_EQ";
-
- case MLX5_CMD_OP_CREATE_CQ:
- return "CREATE_CQ";
-
- case MLX5_CMD_OP_DESTROY_CQ:
- return "DESTROY_CQ";
-
- case MLX5_CMD_OP_QUERY_CQ:
- return "QUERY_CQ";
-
- case MLX5_CMD_OP_MODIFY_CQ:
- return "MODIFY_CQ";
-
- case MLX5_CMD_OP_CREATE_QP:
- return "CREATE_QP";
-
- case MLX5_CMD_OP_DESTROY_QP:
- return "DESTROY_QP";
-
- case MLX5_CMD_OP_RST2INIT_QP:
- return "RST2INIT_QP";
-
- case MLX5_CMD_OP_INIT2RTR_QP:
- return "INIT2RTR_QP";
-
- case MLX5_CMD_OP_RTR2RTS_QP:
- return "RTR2RTS_QP";
-
- case MLX5_CMD_OP_RTS2RTS_QP:
- return "RTS2RTS_QP";
-
- case MLX5_CMD_OP_SQERR2RTS_QP:
- return "SQERR2RTS_QP";
-
- case MLX5_CMD_OP_2ERR_QP:
- return "2ERR_QP";
-
- case MLX5_CMD_OP_2RST_QP:
- return "2RST_QP";
-
- case MLX5_CMD_OP_QUERY_QP:
- return "QUERY_QP";
-
- case MLX5_CMD_OP_MAD_IFC:
- return "MAD_IFC";
-
- case MLX5_CMD_OP_INIT2INIT_QP:
- return "INIT2INIT_QP";
-
- case MLX5_CMD_OP_CREATE_PSV:
- return "CREATE_PSV";
-
- case MLX5_CMD_OP_DESTROY_PSV:
- return "DESTROY_PSV";
-
- case MLX5_CMD_OP_CREATE_SRQ:
- return "CREATE_SRQ";
-
- case MLX5_CMD_OP_DESTROY_SRQ:
- return "DESTROY_SRQ";
-
- case MLX5_CMD_OP_QUERY_SRQ:
- return "QUERY_SRQ";
-
- case MLX5_CMD_OP_ARM_RQ:
- return "ARM_RQ";
-
- case MLX5_CMD_OP_CREATE_XRC_SRQ:
- return "CREATE_XRC_SRQ";
-
- case MLX5_CMD_OP_DESTROY_XRC_SRQ:
- return "DESTROY_XRC_SRQ";
-
- case MLX5_CMD_OP_QUERY_XRC_SRQ:
- return "QUERY_XRC_SRQ";
-
- case MLX5_CMD_OP_ARM_XRC_SRQ:
- return "ARM_XRC_SRQ";
-
- case MLX5_CMD_OP_ALLOC_PD:
- return "ALLOC_PD";
-
- case MLX5_CMD_OP_DEALLOC_PD:
- return "DEALLOC_PD";
-
- case MLX5_CMD_OP_ALLOC_UAR:
- return "ALLOC_UAR";
-
- case MLX5_CMD_OP_DEALLOC_UAR:
- return "DEALLOC_UAR";
-
- case MLX5_CMD_OP_ATTACH_TO_MCG:
- return "ATTACH_TO_MCG";
-
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
- return "DETTACH_FROM_MCG";
-
- case MLX5_CMD_OP_ALLOC_XRCD:
- return "ALLOC_XRCD";
-
- case MLX5_CMD_OP_DEALLOC_XRCD:
- return "DEALLOC_XRCD";
-
- case MLX5_CMD_OP_ACCESS_REG:
- return "MLX5_CMD_OP_ACCESS_REG";
-
- case MLX5_CMD_OP_SET_WOL_ROL:
- return "SET_WOL_ROL";
-
- case MLX5_CMD_OP_QUERY_WOL_ROL:
- return "QUERY_WOL_ROL";
-
- case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- return "ADD_VXLAN_UDP_DPORT";
-
- case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
- return "DELETE_VXLAN_UDP_DPORT";
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
+ switch (command) {
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+ MLX5_COMMAND_STR_CASE(INIT_HCA);
+ MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+ MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+ MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+ MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+ MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+ MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+ MLX5_COMMAND_STR_CASE(SET_ISSI);
+ MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+ MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+ MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+ MLX5_COMMAND_STR_CASE(CREATE_EQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+ MLX5_COMMAND_STR_CASE(QUERY_EQ);
+ MLX5_COMMAND_STR_CASE(GEN_EQE);
+ MLX5_COMMAND_STR_CASE(CREATE_CQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+ MLX5_COMMAND_STR_CASE(QUERY_CQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+ MLX5_COMMAND_STR_CASE(CREATE_QP);
+ MLX5_COMMAND_STR_CASE(DESTROY_QP);
+ MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+ MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+ MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+ MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(2ERR_QP);
+ MLX5_COMMAND_STR_CASE(2RST_QP);
+ MLX5_COMMAND_STR_CASE(QUERY_QP);
+ MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+ MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+ MLX5_COMMAND_STR_CASE(CREATE_PSV);
+ MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+ MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(CREATE_DCT);
+ MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+ MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+ MLX5_COMMAND_STR_CASE(QUERY_DCT);
+ MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_PD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+ MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+ MLX5_COMMAND_STR_CASE(ACCESS_REG);
+ MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+ MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+ MLX5_COMMAND_STR_CASE(MAD_IFC);
+ MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(NOP);
+ MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+ MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(CREATE_TIR);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+ MLX5_COMMAND_STR_CASE(QUERY_TIR);
+ MLX5_COMMAND_STR_CASE(CREATE_SQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+ MLX5_COMMAND_STR_CASE(QUERY_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RMP);
+ MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+ MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+ MLX5_COMMAND_STR_CASE(QUERY_RMP);
+ MLX5_COMMAND_STR_CASE(CREATE_TIS);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+ MLX5_COMMAND_STR_CASE(QUERY_TIS);
+ MLX5_COMMAND_STR_CASE(CREATE_RQT);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+ MLX5_COMMAND_STR_CASE(QUERY_RQT);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
@@ -634,11 +606,36 @@ static void dump_command(struct mlx5_core_dev *dev,
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
@@ -679,6 +676,9 @@ static void cmd_work_handler(struct work_struct *work)
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -723,13 +723,6 @@ static const char *deliv_status_to_str(u8 status)
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -738,13 +731,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
@@ -793,6 +786,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
@@ -802,28 +796,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
@@ -1213,41 +1205,30 @@ err_dbg:
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1283,6 +1264,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index b51e42d6f..873a631ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -39,6 +39,53 @@
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
+#define TASKLET_MAX_TIME 2
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
+
+void mlx5_cq_tasklet_cb(unsigned long data)
+{
+ unsigned long flags;
+ unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
+ struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_core_cq *mcq;
+ struct mlx5_core_cq *temp;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ list_splice_tail_init(&ctx->list, &ctx->process_list);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ list_for_each_entry_safe(mcq, temp, &ctx->process_list,
+ tasklet_ctx.list) {
+ list_del_init(&mcq->tasklet_ctx.list);
+ mcq->tasklet_ctx.comp(mcq);
+ if (atomic_dec_and_test(&mcq->refcount))
+ complete(&mcq->free);
+ if (time_after(jiffies, end))
+ break;
+ }
+
+ if (!list_empty(&ctx->process_list))
+ tasklet_schedule(&ctx->task);
+}
+
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+{
+ unsigned long flags;
+ struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+
+ spin_lock_irqsave(&tasklet_ctx->lock, flags);
+ /* When migrating CQs between EQs will be implemented, please note
+ * that you need to sync this point. It is possible that
+ * while migrating a CQ, completions on the old EQs could
+ * still arrive.
+ */
+ if (list_empty_careful(&cq->tasklet_ctx.list)) {
+ atomic_inc(&cq->refcount);
+ list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+ }
+ spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+}
+
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
{
struct mlx5_core_cq *cq;
@@ -96,6 +143,13 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ c_eqn);
+ struct mlx5_eq *eq;
+
+ eq = mlx5_eqn2eq(dev, eqn);
+ if (IS_ERR(eq))
+ return PTR_ERR(eq);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
@@ -111,6 +165,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ if (!cq->comp)
+ cq->comp = mlx5_add_cq_to_tasklet;
+ /* assuming CQ will be deleted before the EQ */
+ cq->tasklet_ctx.priv = &eq->tasklet_ctx;
+ INIT_LIST_HEAD(&cq->tasklet_ctx.list);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 24344aafb..943b1bd43 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -46,6 +46,9 @@
#include <linux/rhashtable.h>
#include "wq.h"
#include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_MAX_NUM_TC 8
@@ -57,12 +60,30 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
+ MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+#define MLX5_UMR_ALIGN (2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
+
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
@@ -73,233 +94,69 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+ wq_size / 2);
+ default:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+ wq_size / 2);
+ }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ }
+}
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_umr_ctrl_seg uctrl;
+ struct mlx5_mkey_seg mkc;
+ struct mlx5_wqe_data_seg data;
+};
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
-static const char vport_strings[][ETH_GSTRING_LEN] = {
- /* vport statistics */
- "rx_packets",
- "rx_bytes",
- "tx_packets",
- "tx_bytes",
- "rx_error_packets",
- "rx_error_bytes",
- "tx_error_packets",
- "tx_error_bytes",
- "rx_unicast_packets",
- "rx_unicast_bytes",
- "tx_unicast_packets",
- "tx_unicast_bytes",
- "rx_multicast_packets",
- "rx_multicast_bytes",
- "tx_multicast_packets",
- "tx_multicast_bytes",
- "rx_broadcast_packets",
- "rx_broadcast_bytes",
- "tx_broadcast_packets",
- "tx_broadcast_bytes",
-
- /* SW counters */
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "lro_packets",
- "lro_bytes",
- "rx_csum_good",
- "rx_csum_none",
- "rx_csum_sw",
- "tx_csum_offload",
- "tx_csum_inner",
- "tx_queue_stopped",
- "tx_queue_wake",
- "tx_queue_dropped",
- "rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
- /* HW counters */
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_error_packets;
- u64 rx_error_bytes;
- u64 tx_error_packets;
- u64 tx_error_bytes;
- u64 rx_unicast_packets;
- u64 rx_unicast_bytes;
- u64 tx_unicast_packets;
- u64 tx_unicast_bytes;
- u64 rx_multicast_packets;
- u64 rx_multicast_bytes;
- u64 tx_multicast_packets;
- u64 tx_multicast_bytes;
- u64 rx_broadcast_packets;
- u64 rx_broadcast_bytes;
- u64 tx_broadcast_packets;
- u64 tx_broadcast_bytes;
-
- /* SW counters */
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
- u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
- u64 tx_queue_stopped;
- u64 tx_queue_wake;
- u64 tx_queue_dropped;
- u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS 35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
- /* IEEE802.3 counters */
- "frames_tx",
- "frames_rx",
- "check_seq_err",
- "alignment_err",
- "octets_tx",
- "octets_received",
- "multicast_xmitted",
- "broadcast_xmitted",
- "multicast_rx",
- "broadcast_rx",
- "in_range_len_errors",
- "out_of_range_len",
- "too_long_errors",
- "symbol_err",
- "mac_control_tx",
- "mac_control_rx",
- "unsupported_op_rx",
- "pause_ctrl_rx",
- "pause_ctrl_tx",
-
- /* RFC2863 counters */
- "in_octets",
- "in_ucast_pkts",
- "in_discards",
- "in_errors",
- "in_unknown_protos",
- "out_octets",
- "out_ucast_pkts",
- "out_discards",
- "out_errors",
- "in_multicast_pkts",
- "in_broadcast_pkts",
- "out_multicast_pkts",
- "out_broadcast_pkts",
-
- /* RFC2819 counters */
- "drop_events",
- "octets",
- "pkts",
- "broadcast_pkts",
- "multicast_pkts",
- "crc_align_errors",
- "undersize_pkts",
- "oversize_pkts",
- "fragments",
- "jabbers",
- "collisions",
- "p64octets",
- "p65to127octets",
- "p128to255octets",
- "p256to511octets",
- "p512to1023octets",
- "p1024to1518octets",
- "p1519to2047octets",
- "p2048to4095octets",
- "p4096to8191octets",
- "p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS 19
-#define NUM_RFC_2863_COUNTERS 13
-#define NUM_RFC_2819_COUNTERS 21
-#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
- NUM_RFC_2863_COUNTERS + \
- NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
- __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
- __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
- __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "csum_none",
- "csum_sw",
- "lro_packets",
- "lro_bytes",
- "wqe_err"
-};
-
-struct mlx5e_rq_stats {
- u64 packets;
- u64 bytes;
- u64 csum_none;
- u64 csum_sw;
- u64 lro_packets;
- u64 lro_bytes;
- u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "csum_offload_inner",
- "nop",
- "csum_offload_none",
- "stopped",
- "wake",
- "dropped",
-};
-
-struct mlx5e_sq_stats {
- /* commonly accessed in data path */
- u64 packets;
- u64 bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 csum_offload_inner;
- u64 nop;
- /* less likely accessed in data path */
- u64 csum_offload_none;
- u64 stopped;
- u64 wake;
- u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
- struct mlx5e_vport_stats vport;
- struct mlx5e_pport_stats pport;
-};
-
struct mlx5e_params {
u8 log_sq_size;
+ u8 rq_wq_type;
+ u8 mpwqe_log_stride_sz;
+ u8 mpwqe_log_num_strides;
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
+ bool rx_cqe_compress_admin;
+ bool rx_cqe_compress;
u16 rx_cq_moderation_usec;
u16 rx_cq_moderation_pkts;
u16 tx_cq_moderation_usec;
@@ -311,6 +168,7 @@ struct mlx5e_params {
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+ bool vlan_strip_disable;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
@@ -331,6 +189,8 @@ struct mlx5e_tstamp {
enum {
MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_FLUSH_TIMEOUT,
};
struct mlx5e_cq {
@@ -343,32 +203,91 @@ struct mlx5e_cq {
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
+ /* cqe decompression */
+ struct mlx5_cqe64 title;
+ struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+ u8 mini_arr_idx;
+ u16 decmprs_left;
+ u16 decmprs_wqe_counter;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+ u16 ix);
+
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb;
+ struct mlx5e_mpw_info *wqe_info;
+ __be32 mkey_be;
+ __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
/* control */
struct mlx5_wq_ctrl wq_ctrl;
+ u8 wq_type;
+ u32 mpwqe_stride_sz;
+ u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
+struct mlx5e_umr_dma_info {
+ __be64 *mtt;
+ __be64 *mtt_no_align;
+ dma_addr_t mtt_addr;
+ struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+ union {
+ struct mlx5e_dma_info dma_info;
+ struct mlx5e_umr_dma_info umr;
+ };
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+ void (*dma_pre_sync)(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len);
+ void (*add_skb_frag)(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset, u32 len);
+ void (*copy_skb_header)(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen);
+ void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
@@ -389,6 +308,12 @@ struct mlx5e_sq_dma {
enum {
MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
MLX5E_SQ_STATE_BF_ENABLE,
+ MLX5E_SQ_STATE_TX_TIMEOUT,
+};
+
+struct mlx5e_ico_wqe_info {
+ u8 opcode;
+ u8 num_wqebbs;
};
struct mlx5e_sq {
@@ -432,6 +357,7 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
+ struct mlx5e_ico_wqe_info *ico_wqe_info;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +374,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq icosq; /* internal control operations */
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -474,42 +401,42 @@ enum mlx5e_traffic_types {
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
+ MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
+};
-enum mlx5e_rqt_ix {
- MLX5E_INDIRECTION_RQT,
- MLX5E_SINGLE_RQ_RQT,
- MLX5E_NUM_RQT,
+struct mlx5e_vxlan_db {
+ spinlock_t lock; /* protect vxlan table */
+ struct radix_tree_root tree;
};
-struct mlx5e_eth_addr_info {
+struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
- u32 tt_vec;
- struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+ struct mlx5_flow_rule *rule;
};
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
- struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct mlx5e_eth_addr_info broadcast;
- struct mlx5e_eth_addr_info allmulti;
- struct mlx5e_eth_addr_info promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
};
-enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
- MLX5E_STATE_OPENED,
- MLX5E_STATE_DESTROYING,
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable_params ht_params;
+ struct rhashtable ht;
};
-struct mlx5e_vlan_db {
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule;
@@ -517,29 +444,74 @@ struct mlx5e_vlan_db {
bool filter_disabled;
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
};
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
};
-struct mlx5e_tc_flow_table {
- struct mlx5_flow_table *t;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
- struct rhashtable_params ht_params;
- struct rhashtable ht;
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
};
-struct mlx5e_flow_tables {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_tc_flow_table tc;
- struct mlx5e_flow_table vlan;
- struct mlx5e_flow_table main;
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_arfs_tables arfs;
+};
+
+struct mlx5e_direct_tir {
+ u32 tirn;
+ u32 rqtn;
+};
+
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
};
struct mlx5e_priv {
@@ -554,42 +526,30 @@ struct mlx5e_priv {
u32 pdn;
u32 tdn;
struct mlx5_core_mkey mkey;
+ struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn[MLX5E_NUM_RQT];
- u32 tirn[MLX5E_NUM_TT];
+ u32 indir_rqtn;
+ u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_direct_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
- struct mlx5e_flow_tables fts;
- struct mlx5e_eth_addr_db eth_addr;
- struct mlx5e_vlan_db vlan;
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
+ struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
-#endif
struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_eth_seg eth;
-};
-
-struct mlx5e_rx_wqe {
- struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data;
+ u16 q_counter;
};
enum mlx5e_link_mode {
@@ -634,14 +594,39 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -650,6 +635,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -658,16 +644,20 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe, int bf_sz)
+ struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
@@ -681,9 +671,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
if (bf_sz)
- __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+ __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
- mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
@@ -704,12 +694,43 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
+static inline int mlx5e_get_mtt_octw(int npages)
+{
+ return ALIGN(npages, 8) / 2;
+}
+
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#endif
+
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
new file mode 100644
index 000000000..3515e78ba
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include <linux/mlx5/fs.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en.h"
+
+struct arfs_tuple {
+ __be16 etype;
+ u8 ip_proto;
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+struct arfs_rule {
+ struct mlx5e_priv *priv;
+ struct work_struct arfs_work;
+ struct mlx5_flow_rule *rule;
+ struct hlist_node hlist;
+ int rxq;
+ /* Flow ID passed to ndo_rx_flow_steer */
+ int flow_id;
+ /* Filter ID returned by ndo_rx_flow_steer */
+ int filter_id;
+ struct arfs_tuple tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+ for (i = 0; i < ARFS_NUM_TYPES; i++) \
+ mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+ for (j = 0; j < ARFS_HASH_SIZE; j++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ return MLX5E_TT_IPV4_TCP;
+ case ARFS_IPV4_UDP:
+ return MLX5E_TT_IPV4_UDP;
+ case ARFS_IPV6_TCP:
+ return MLX5E_TT_IPV6_TCP;
+ case ARFS_IPV6_UDP:
+ return MLX5E_TT_IPV6_UDP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ u32 *tirn = priv->indir_tirn;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.tir_num = tirn[i];
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to bypass the aRFS tables*/
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed\n",
+ __func__);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ arfs_del_rules(priv);
+
+ return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to point on the aRFS FTs */
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed err=%d\n",
+ __func__, err);
+ arfs_disable(priv);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+ mlx5_del_flow_rule(arfs_t->default_rule);
+ mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return;
+
+ arfs_del_rules(priv);
+ destroy_workqueue(priv->fs.arfs.wq);
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+ }
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct mlx5_flow_destination dest;
+ u8 match_criteria_enable = 0;
+ u32 *tirn = priv->indir_tirn;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
+ break;
+ case ARFS_IPV4_UDP:
+ dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
+ break;
+ case ARFS_IPV6_TCP:
+ dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
+ break;
+ case ARFS_IPV6_UDP:
+ dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, match_criteria_enable,
+ match_criteria, match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(arfs_t->default_rule)) {
+ err = PTR_ERR(arfs_t->default_rule);
+ arfs_t->default_rule = NULL;
+ netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+ __func__, type);
+ }
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS 2
+#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
+ MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ enum arfs_type type)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ in = mlx5_vzalloc(inlen);
+ if (!in || !ft->g) {
+ kvfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV6_TCP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+ break;
+ case ARFS_IPV4_UDP:
+ case ARFS_IPV6_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV4_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ break;
+ case ARFS_IPV6_TCP:
+ case ARFS_IPV6_UDP:
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+out:
+ kvfree(in);
+
+ return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = arfs_create_groups(ft, type);
+ if (err)
+ goto err;
+
+ err = arfs_add_default_rule(priv, type);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return 0;
+
+ spin_lock_init(&priv->fs.arfs.arfs_lock);
+ INIT_LIST_HEAD(&priv->fs.arfs.rules);
+ priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs.arfs.wq)
+ return -ENOMEM;
+
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ err = arfs_create_table(priv, i);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ mlx5e_arfs_destroy_tables(priv);
+ return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_node *htmp;
+ int quota = 0;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
+ if (!work_pending(&arfs_rule->arfs_work) &&
+ rps_may_expire_flow(priv->netdev,
+ arfs_rule->rxq, arfs_rule->flow_id,
+ arfs_rule->filter_id)) {
+ hlist_del_init(&arfs_rule->hlist);
+ hlist_add_head(&arfs_rule->hlist, &del_list);
+ }
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+ if (arfs_rule->rule)
+ mlx5_del_flow_rule(arfs_rule->rule);
+ hlist_del(&arfs_rule->hlist);
+ kfree(arfs_rule);
+ }
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+ struct hlist_node *htmp;
+ struct arfs_rule *rule;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ hlist_del_init(&rule->hlist);
+ hlist_add_head(&rule->hlist, &del_list);
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+ cancel_work_sync(&rule->arfs_work);
+ if (rule->rule)
+ mlx5_del_flow_rule(rule->rule);
+ hlist_del(&rule->hlist);
+ kfree(rule);
+ }
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+ __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+
+ bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+ return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+ u8 ip_proto, __be16 etype)
+{
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV4_TCP];
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV4_UDP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV6_TCP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+ return NULL;
+}
+
+static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
+ struct arfs_rule *arfs_rule)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_tuple *tuple = &arfs_rule->tuple;
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_destination dest;
+ struct arfs_table *arfs_table;
+ u8 match_criteria_enable = 0;
+ struct mlx5_flow_table *ft;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ntohs(tuple->etype));
+ arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+ if (!arfs_table) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ft = arfs_table->ft.t;
+ if (tuple->ip_proto == IPPROTO_TCP) {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.tcp_sport);
+ MLX5_SET(fte_match_param, match_value, outer_headers.tcp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, match_value, outer_headers.tcp_sport,
+ ntohs(tuple->src_port));
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.udp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.udp_sport);
+ MLX5_SET(fte_match_param, match_value, outer_headers.udp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, match_value, outer_headers.udp_sport,
+ ntohs(tuple->src_port));
+ }
+ if (tuple->etype == htons(ETH_P_IP)) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &tuple->src_ipv4,
+ 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &tuple->dst_ipv4,
+ 4);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &tuple->src_ipv6,
+ 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &tuple->dst_ipv6,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ }
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+ rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+ match_value, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ }
+
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+ struct mlx5_flow_rule *rule, u16 rxq)
+{
+ struct mlx5_flow_destination dst;
+ int err = 0;
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst.tir_num = priv->direct_tir[rxq].tirn;
+ err = mlx5_modify_rule_destination(rule, &dst);
+ if (err)
+ netdev_warn(priv->netdev,
+ "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+ struct arfs_rule *arfs_rule = container_of(work,
+ struct arfs_rule,
+ arfs_work);
+ struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5_flow_rule *rule;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_del(&arfs_rule->hlist);
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ mutex_unlock(&priv->state_lock);
+ kfree(arfs_rule);
+ goto out;
+ }
+ mutex_unlock(&priv->state_lock);
+
+ if (!arfs_rule->rule) {
+ rule = arfs_add_rule(priv, arfs_rule);
+ if (IS_ERR(rule))
+ goto out;
+ arfs_rule->rule = rule;
+ } else {
+ arfs_modify_rule_rq(priv, arfs_rule->rule,
+ arfs_rule->rxq);
+ }
+out:
+ arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->dest;
+ return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->source;
+ return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ struct arfs_table *arfs_t,
+ const struct sk_buff *skb,
+ u16 rxq, u32 flow_id)
+{
+ struct arfs_rule *rule;
+ struct arfs_tuple *tuple;
+
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+ if (!rule)
+ return NULL;
+
+ rule->priv = priv;
+ rule->rxq = rxq;
+ INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+ tuple = &rule->tuple;
+ tuple->etype = skb->protocol;
+ if (tuple->etype == htons(ETH_P_IP)) {
+ tuple->src_ipv4 = ip_hdr(skb)->saddr;
+ tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+ tuple->ip_proto = arfs_get_ip_proto(skb);
+ tuple->src_port = arfs_get_src_port(skb);
+ tuple->dst_port = arfs_get_dst_port(skb);
+
+ rule->flow_id = flow_id;
+ rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+ hlist_add_head(&rule->hlist,
+ arfs_hash_bucket(arfs_t, tuple->src_port,
+ tuple->dst_port));
+ return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+ const struct sk_buff *skb)
+{
+ if (tuple->etype == htons(ETH_P_IP) &&
+ tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+ tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ if (tuple->etype == htons(ETH_P_IPV6) &&
+ (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr))) &&
+ (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr))))
+ return true;
+ return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+ const struct sk_buff *skb)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_head *head;
+ __be16 src_port = arfs_get_src_port(skb);
+ __be16 dst_port = arfs_get_dst_port(skb);
+
+ head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ hlist_for_each_entry(arfs_rule, head, hlist) {
+ if (arfs_rule->tuple.src_port == src_port &&
+ arfs_rule->tuple.dst_port == dst_port &&
+ arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ return arfs_rule;
+ }
+ }
+
+ return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_table *arfs_t;
+ struct arfs_rule *arfs_rule;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ if (!arfs_t)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&arfs->arfs_lock);
+ arfs_rule = arfs_find_rule(arfs_t, skb);
+ if (arfs_rule) {
+ if (arfs_rule->rxq == rxq_index) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+ }
+ arfs_rule->rxq = rxq_index;
+ } else {
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+ rxq_index, flow_id);
+ if (!arfs_rule) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -ENOMEM;
+ }
+ }
+ queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2018eebe1..847a8f3ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Disable CQE compression */
+ mlx5e_modify_rx_cqe_compression(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3036f279a..c585349e0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
@@ -140,8 +140,12 @@ static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i])
+ return -EINVAL;
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
if (bw_sum != 0 && bw_sum != 100)
@@ -174,8 +178,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ int i;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+ pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+ }
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3476ab844..e667a870e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -165,26 +165,112 @@ static const struct {
},
};
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+ (NUM_RQ_STATS * priv->params.num_channels * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ (hweight8(mlx5e_query_pfc_combined(priv)) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
+
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
- priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->params.num_tc *
- NUM_SQ_STATS;
+ return NUM_SW_COUNTERS +
+ MLX5E_NUM_Q_CNTRS(priv) +
+ NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ MLX5E_NUM_RQ_STATS(priv) +
+ MLX5E_NUM_SQ_STATS(priv) +
+ MLX5E_NUM_PFC_COUNTERS(priv);
/* fallthrough */
default:
return -EOPNOTSUPP;
}
}
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
+
+ /* SW counters */
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+
+ /* Q counters */
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_stats_desc[i].format);
+
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_802_3_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2863_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2819_stats_desc[i].format);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, prio);
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
+}
+
static void mlx5e_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
- int i, j, tc, idx = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
switch (stringset) {
@@ -195,30 +281,7 @@ static void mlx5e_get_strings(struct net_device *dev,
break;
case ETH_SS_STATS:
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_strings[i]);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_strings[i]);
-
- /* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "rx%d_%s", i, rq_stats_strings[j]);
-
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data +
- (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_strings[j]);
+ mlx5e_fill_stats_strings(priv, data);
break;
}
}
@@ -227,7 +290,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j, tc, idx = 0;
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
if (!data)
return;
@@ -237,33 +301,68 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i);
+
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i);
+
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->rq.stats)[j];
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+ rq_stats_desc, j);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+ sq_stats_desc, j);
}
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
param->rx_pending = 1 << priv->params.log_rq_size;
param->tx_pending = 1 << priv->params.log_sq_size;
@@ -274,6 +373,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
+ int rq_wq_type = priv->params.rq_wq_type;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
@@ -289,16 +389,16 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
return -EINVAL;
}
- if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
return -EINVAL;
}
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
@@ -316,8 +416,7 @@ static int mlx5e_set_ringparam(struct net_device *dev,
log_rq_size = order_base_2(param->rx_pending);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = min_t(u16, param->rx_pending - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -357,6 +456,7 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
+ bool arfs_enabled;
bool was_opened;
int err = 0;
@@ -385,13 +485,27 @@ static int mlx5e_set_channels(struct net_device *dev,
if (was_opened)
mlx5e_close_locked(dev);
+ arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ if (arfs_enabled)
+ mlx5e_arfs_disable(priv);
+
priv->params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
+ if (err)
+ goto out;
+
+ if (arfs_enabled) {
+ err = mlx5e_arfs_enable(priv);
+ if (err)
+ netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ __func__, err);
+ }
+out:
mutex_unlock(&priv->state_lock);
return err;
@@ -499,6 +613,25 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
return 0;
}
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ u32 max_speed = 0;
+ u32 proto_cap;
+ int err;
+ int i;
+
+ err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+ if (proto_cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+ *speed = max_speed;
+ return 0;
+}
+
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
struct ethtool_cmd *cmd)
@@ -727,9 +860,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_TT; i++)
- if (IS_HASHING_TT(i))
- mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5_core_modify_tir(mdev, priv->indir_tirn[i], in, inlen);
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -751,9 +883,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
+ u32 rqtn = priv->indir_rqtn;
+
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
if (key)
@@ -1036,6 +1170,108 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static int mlx5e_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 beacon_duration;
+
+ if (!MLX5_CAP_GEN(mdev, beacon_led))
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_INF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_OFF;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *dev = priv->mdev;
+ int size_read = 0;
+ u8 data[4];
+
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ if (size_read < 2)
+ return -EIO;
+
+ /* data[0] = identifier byte */
+ switch (data[0]) {
+ case MLX5_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ case MLX5_MODULE_ID_QSFP28:
+ /* data[1] = revision id */
+ if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX5_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+ __func__, data[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int size_read;
+ int i = 0;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+ data + i);
+
+ if (!size_read)
+ /* Done reading */
+ return 0;
+
+ if (size_read < 0) {
+ netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+ __func__, size_read);
+ return 0;
+ }
+
+ i += size_read;
+ offset += size_read;
+ }
+
+ return 0;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1060,6 +1296,9 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
+ .set_phys_id = mlx5e_set_phys_id,
.get_wol = mlx5e_get_wol,
.set_wol = mlx5e_set_wol,
+ .get_module_info = mlx5e_get_module_info,
+ .get_module_eeprom = mlx5e_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d00a24203..b32740092 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,7 +37,10 @@
#include <linux/mlx5/fs.h>
#include "en.h"
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai);
enum {
MLX5E_FULLMATCH = 0,
@@ -58,21 +61,21 @@ enum {
MLX5E_ACTION_DEL = 2,
};
-struct mlx5e_eth_addr_hash_node {
+struct mlx5e_l2_hash_node {
struct hlist_node hlist;
u8 action;
- struct mlx5e_eth_addr_info ai;
+ struct mlx5e_l2_rule ai;
};
-static inline int mlx5e_hash_eth_addr(u8 *addr)
+static inline int mlx5e_hash_l2(u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
{
- struct mlx5e_eth_addr_hash_node *hn;
- int ix = mlx5e_hash_eth_addr(addr);
+ struct mlx5e_l2_hash_node *hn;
+ int ix = mlx5e_hash_l2(addr);
int found = 0;
hlist_for_each_entry(hn, &hash[ix], hlist)
@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
hlist_add_head(&hn->hlist, &hash[ix]);
}
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
{
hlist_del(&hn->hlist);
kfree(hn);
}
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai)
-{
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_ANY))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
- if (is_unicast_ether_addr(addr))
- return MLX5E_UC;
-
- if ((addr[0] == 0x01) &&
- (addr[1] == 0x00) &&
- (addr[2] == 0x5e) &&
- !(addr[3] & 0x80))
- return MLX5E_MC_IPV4;
-
- if ((addr[0] == 0x33) &&
- (addr[1] == 0x33))
- return MLX5E_MC_IPV6;
-
- return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
- int eth_addr_type;
- u32 ret;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
- switch (eth_addr_type) {
- case MLX5E_UC:
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- case MLX5E_MC_IPV4:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV4) |
- 0;
- break;
-
- case MLX5E_MC_IPV6:
- ret =
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV6) |
- 0;
- break;
-
- case MLX5E_MC_OTHER:
- ret =
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- break;
-
- case MLX5E_ALLMULTI:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- default: /* MLX5E_PROMISC */
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai,
- int type, u32 *mc, u32 *mv)
-{
- struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- struct mlx5_flow_rule **rule_p;
- struct mlx5_flow_table *ft = priv->fts.main.t;
- u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
- outer_headers.dmac_47_16);
- u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
- outer_headers.dmac_47_16);
- u32 *tirn = priv->tirn;
- u32 tt_vec;
- int err = 0;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- eth_broadcast_addr(mc_dmac);
- ether_addr_copy(mv_dmac, ai->addr);
- break;
-
- case MLX5E_ALLMULTI:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- mc_dmac[0] = 0x01;
- mv_dmac[0] = 0x01;
- break;
-
- case MLX5E_PROMISC:
- break;
- }
-
- tt_vec = mlx5e_get_tt_vec(ai, type);
-
- if (tt_vec & BIT(MLX5E_TT_ANY)) {
- rule_p = &ai->ft_rule[MLX5E_TT_ANY];
- dest.tir_num = tirn[MLX5E_TT_ANY];
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_ANY);
- }
-
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
- dest.tir_num = tirn[MLX5E_TT_IPV4];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
- dest.tir_num = tirn[MLX5E_TT_IPV6];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6);
- }
-
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
-
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
- }
-
- return 0;
-
-err_del_ai:
- err = PTR_ERR(*rule_p);
- *rule_p = NULL;
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
- return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai, int type)
-{
- u32 *match_criteria;
- u32 *match_value;
- int err = 0;
-
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_eth_addr_rule_out;
- }
-
- err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
- match_value);
-
-add_eth_addr_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
-
- return err;
-}
-
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct net_device *ndev = priv->netdev;
@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -514,28 +158,28 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
u16 vid, u32 *mc, u32 *mv)
{
- struct mlx5_flow_table *ft = priv->fts.vlan.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fts.main.t;
+ dest.ft = priv->fs.l2.ft.t;
match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- rule_p = &priv->vlan.untagged_rule;
+ rule_p = &priv->fs.vlan.untagged_rule;
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- rule_p = &priv->vlan.any_vlan_rule;
+ rule_p = &priv->fs.vlan.any_vlan_rule;
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->vlan.active_vlans_rule[vid];
+ rule_p = &priv->fs.vlan.active_vlans_rule[vid];
MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
@@ -589,22 +233,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->vlan.untagged_rule) {
- mlx5_del_flow_rule(priv->vlan.untagged_rule);
- priv->vlan.untagged_rule = NULL;
+ if (priv->fs.vlan.untagged_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+ priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- if (priv->vlan.any_vlan_rule) {
- mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
- priv->vlan.any_vlan_rule = NULL;
+ if (priv->fs.vlan.any_vlan_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+ priv->fs.vlan.any_vlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
- priv->vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_vlans_rule[vid]) {
+ mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+ priv->fs.vlan.active_vlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -613,10 +257,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = false;
+ priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -624,10 +268,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- if (priv->vlan.filter_disabled)
+ if (priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = true;
+ priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -638,7 +282,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- set_bit(vid, priv->vlan.active_vlans);
+ set_bit(vid, priv->fs.vlan.active_vlans);
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
@@ -648,7 +292,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->vlan.active_vlans);
+ clear_bit(vid, priv->fs.vlan.active_vlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
@@ -656,21 +300,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
- for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+ struct mlx5e_l2_hash_node *hn)
{
switch (hn->action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
- mlx5e_del_eth_addr_from_hash(hn);
+ mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ mlx5e_del_l2_from_hash(hn);
break;
}
}
@@ -682,14 +326,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
netif_addr_lock_bh(netdev);
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
- priv->netdev->dev_addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+ priv->netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
@@ -699,17 +343,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
struct net_device *ndev = priv->netdev;
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->eth_addr.broadcast_enabled)
+ else if (priv->fs.l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -725,7 +369,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
u8 (*addr_array)[ETH_ALEN] = NULL;
struct hlist_head *addr_list;
struct hlist_node *tmp;
@@ -734,12 +378,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
@@ -770,7 +414,7 @@ out:
static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -781,26 +425,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_execute_l2_action(priv, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_execute_l2_action(priv, hn);
}
static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -814,7 +458,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
struct net_device *ndev = priv->netdev;
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -830,27 +474,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
- mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->vlan.filter_disabled)
+ mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
}
if (enable_allmulti)
- mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
mlx5e_handle_netdev_addr(priv);
if (disable_broadcast)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+ mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
ea->promisc_enabled = promisc_enabled;
@@ -872,224 +516,454 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
{
- ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
}
-#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
-#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
-#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
-#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
-#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
-#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
-#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
-#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
- MLX5E_MAIN_GROUP1_SIZE +\
- MLX5E_MAIN_GROUP2_SIZE +\
- MLX5E_MAIN_GROUP3_SIZE +\
- MLX5E_MAIN_GROUP4_SIZE +\
- MLX5E_MAIN_GROUP5_SIZE +\
- MLX5E_MAIN_GROUP6_SIZE +\
- MLX5E_MAIN_GROUP7_SIZE +\
- MLX5E_MAIN_GROUP8_SIZE)
-
-static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
{
- u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.outer_headers.dmac_47_16);
+ mlx5e_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+ mlx5_del_flow_rule(ttc->rules[i]);
+ ttc->rules[i] = NULL;
+ }
+ }
+}
+
+static struct {
+ u16 etype;
+ u8 proto;
+} ttc_rules[] = {
+ [MLX5E_TT_IPV4_TCP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV6_TCP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV4_UDP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV6_UDP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV4] = {
+ .etype = ETH_P_IP,
+ .proto = 0,
+ },
+ [MLX5E_TT_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = 0,
+ },
+ [MLX5E_TT_ANY] = {
+ .etype = 0,
+ .proto = 0,
+ },
+};
+
+static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest,
+ u16 etype,
+ u8 proto)
+{
+ struct mlx5_flow_rule *rule;
+ u8 match_criteria_enable = 0;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (proto) {
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol, proto);
+ }
+ if (etype) {
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype, etype);
+ }
+
+ rule = mlx5_add_flow_rule(ft, match_criteria_enable,
+ match_criteria, match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ }
+out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5e_ttc_table *ttc;
+ struct mlx5_flow_rule **rules;
+ struct mlx5_flow_table *ft;
+ int tt;
int err;
+
+ ttc = &priv->fs.ttc;
+ ft = ttc->ft.t;
+ rules = ttc->rules;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ if (tt == MLX5E_TT_ANY)
+ dest.tir_num = priv->direct_tir[0].tirn;
+ else
+ dest.tir_num = priv->indir_tirn[tt];
+ rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+ ttc_rules[tt].etype,
+ ttc_rules[tt].proto);
+ if (IS_ERR(rules[tt]))
+ goto del_rules;
+ }
+
+ return 0;
+
+del_rules:
+ err = PTR_ERR(rules[tt]);
+ rules[tt] = NULL;
+ mlx5e_cleanup_ttc_rules(ttc);
+ return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &ttc->ft;
int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP0_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* L4 Group */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP1_SIZE;
+ ix += MLX5E_TTC_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* L3 Group */
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP2_SIZE;
+ ix += MLX5E_TTC_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
+ /* Any Group */
memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- eth_broadcast_addr(dmac);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP3_SIZE;
+ ix += MLX5E_TTC_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP4_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ kvfree(in);
+ return 0;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP5_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- dmac[0] = 0x01;
+ return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+ mlx5e_cleanup_ttc_rules(ttc);
+ mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+ struct mlx5e_flow_table *ft = &ttc->ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = mlx5e_create_ttc_table_groups(ttc);
+ if (err)
+ goto err;
+
+ err = mlx5e_generate_ttc_table_rules(priv);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai)
+{
+ if (!IS_ERR_OR_NULL(ai->rule)) {
+ mlx5_del_flow_rule(ai->rule);
+ ai->rule = NULL;
+ }
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_destination dest;
+ u8 match_criteria_enable = 0;
+ u32 *match_criteria;
+ u32 *match_value;
+ int err = 0;
+ u8 *mc_dmac;
+ u8 *mv_dmac;
+
+ match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!match_value || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_l2_rule_out;
+ }
+
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ mv_dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ eth_broadcast_addr(mc_dmac);
+ ether_addr_copy(mv_dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ mc_dmac[0] = 0x01;
+ mv_dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ ai->rule = mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
+ match_value,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR(ai->rule)) {
+ netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
+ err = PTR_ERR(ai->rule);
+ ai->rule = NULL;
+ }
+
+add_l2_rule_out:
+ kvfree(match_criteria);
+ kvfree(match_value);
+
+ return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS 3
+#define MLX5E_L2_GROUP1_SIZE BIT(0)
+#define MLX5E_L2_GROUP2_SIZE BIT(15)
+#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
+ MLX5E_L2_GROUP2_SIZE +\
+ MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &l2_table->ft;
+ int ix = 0;
+ u8 *mc_dmac;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers.dmac_47_16);
+ /* Flow Group for promiscuous */
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP6_SIZE;
+ ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP7_SIZE;
+ ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- dmac[0] = 0x01;
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP8_SIZE;
+ ix += MLX5E_L2_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
+ kvfree(in);
return 0;
err_destroy_groups:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
+ kvfree(in);
return err;
}
-static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- u32 *in;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- err = __mlx5e_create_main_groups(ft, in, inlen);
-
- kvfree(in);
- return err;
+ mlx5e_destroy_flow_table(&priv->fs.l2.ft);
}
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.main;
+ struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_flow_table *ft = &l2_table->ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g) {
- err = -ENOMEM;
- goto err_destroy_main_flow_table;
- }
- err = mlx5e_create_main_groups(ft);
+ err = mlx5e_create_l2_table_groups(l2_table);
if (err)
- goto err_free_g;
- return 0;
+ goto err_destroy_flow_table;
-err_free_g:
- kfree(ft->g);
+ return 0;
-err_destroy_main_flow_table:
+err_destroy_flow_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
- mlx5e_destroy_groups(ft);
- kfree(ft->g);
- mlx5_destroy_flow_table(ft->t);
- ft->t = NULL;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
- mlx5e_destroy_flow_table(&priv->fts.main);
-}
-
#define MLX5E_NUM_VLAN_GROUPS 2
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE)
-static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+ int inlen)
{
int err;
int ix = 0;
@@ -1128,7 +1002,7 @@ err_destroy_groups:
return err;
}
-static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
{
u32 *in;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1138,19 +1012,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
if (!in)
return -ENOMEM;
- err = __mlx5e_create_vlan_groups(ft, in, inlen);
+ err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
kvfree(in);
return err;
}
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.vlan;
+ struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1160,65 +1035,90 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
- goto err_destroy_vlan_flow_table;
+ goto err_destroy_vlan_table;
}
- err = mlx5e_create_vlan_groups(ft);
+ err = mlx5e_create_vlan_table_groups(ft);
if (err)
goto err_free_g;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_groups;
+
return 0;
+err_destroy_vlan_flow_groups:
+ mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
-
-err_destroy_vlan_flow_table:
+err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fts.vlan);
+ mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fts.ns)
+ if (!priv->fs.ns)
return -EINVAL;
- err = mlx5e_create_vlan_flow_table(priv);
- if (err)
- return err;
+ err = mlx5e_arfs_create_tables(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+ err);
+ priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ }
- err = mlx5e_create_main_flow_table(priv);
- if (err)
- goto err_destroy_vlan_flow_table;
+ err = mlx5e_create_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_main_flow_table;
+ err = mlx5e_create_l2_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+ err);
+ goto err_destroy_ttc_table;
+ }
+
+ err = mlx5e_create_vlan_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+ err);
+ goto err_destroy_l2_table;
+ }
return 0;
-err_destroy_main_flow_table:
- mlx5e_destroy_main_flow_table(priv);
-err_destroy_vlan_flow_table:
- mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_l2_table:
+ mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+ mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+ mlx5e_arfs_destroy_tables(priv);
return err;
}
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- mlx5e_destroy_main_flow_table(priv);
- mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_arfs_destroy_tables(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 94fef7058..5a4d88c2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,6 +39,13 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
+ MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
+ MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
+};
+
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@@ -48,6 +55,7 @@ struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
+ bool icosq;
};
struct mlx5e_cq_param {
@@ -59,8 +67,10 @@ struct mlx5e_cq_param {
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
+ struct mlx5e_cq_param icosq_cq;
};
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -71,10 +81,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -88,111 +101,85 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_pport_stats *s = &priv->stats.pport;
- u32 *in;
- u32 *out;
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-
- in = mlx5_vzalloc(sz);
- out = mlx5_vzalloc(sz);
- if (!in || !out)
- goto free_out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->IEEE_802_3_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->IEEE_802_3_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2863_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2863_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2819_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2819_counters));
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
-free_out:
- kvfree(in);
- kvfree(out);
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
- u32 *out;
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u64 tx_offload_none;
+ u64 tx_offload_none = 0;
int i, j;
- out = mlx5_vzalloc(outlen);
- if (!out)
- return;
-
- /* Collect firts the SW counters and then HW for consistency */
- s->rx_packets = 0;
- s->rx_bytes = 0;
- s->tx_packets = 0;
- s->tx_bytes = 0;
- s->tso_packets = 0;
- s->tso_bytes = 0;
- s->tso_inner_packets = 0;
- s->tso_inner_bytes = 0;
- s->tx_queue_stopped = 0;
- s->tx_queue_wake = 0;
- s->tx_queue_dropped = 0;
- s->tx_csum_inner = 0;
- tx_offload_none = 0;
- s->lro_packets = 0;
- s->lro_bytes = 0;
- s->rx_csum_none = 0;
- s->rx_csum_sw = 0;
- s->rx_wqe_err = 0;
+ memset(s, 0, sizeof(*s));
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
- /* HW counters */
+ /* Update calculated offload counters */
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
@@ -202,56 +189,69 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
memset(out, 0, outlen);
- if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+ u32 *in;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
goto free_out;
-#define MLX5_GET_CTR(p, x) \
- MLX5_GET64(query_vport_counter_out, p, x)
-
- s->rx_error_packets =
- MLX5_GET_CTR(out, received_errors.packets);
- s->rx_error_bytes =
- MLX5_GET_CTR(out, received_errors.octets);
- s->tx_error_packets =
- MLX5_GET_CTR(out, transmit_errors.packets);
- s->tx_error_bytes =
- MLX5_GET_CTR(out, transmit_errors.octets);
-
- s->rx_unicast_packets =
- MLX5_GET_CTR(out, received_eth_unicast.packets);
- s->rx_unicast_bytes =
- MLX5_GET_CTR(out, received_eth_unicast.octets);
- s->tx_unicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
- s->tx_unicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
-
- s->rx_multicast_packets =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
- s->rx_multicast_bytes =
- MLX5_GET_CTR(out, received_eth_multicast.octets);
- s->tx_multicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
- s->tx_multicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
-
- s->rx_broadcast_packets =
- MLX5_GET_CTR(out, received_eth_broadcast.packets);
- s->rx_broadcast_bytes =
- MLX5_GET_CTR(out, received_eth_broadcast.octets);
- s->tx_broadcast_packets =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
- s->tx_broadcast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
- /* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
- mlx5e_update_pport_counters(priv);
free_out:
- kvfree(out);
+ kvfree(in);
+}
+
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+ &qcnt->rx_out_of_buffer);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_update_q_counter(priv);
+ mlx5e_update_vport_counters(priv);
+ mlx5e_update_pport_counters(priv);
+ mlx5e_update_sw_counters(priv);
}
static void mlx5e_update_stats_work(struct work_struct *work)
@@ -273,7 +273,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
@@ -289,12 +289,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
@@ -309,6 +309,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 byte_count;
int wq_sz;
int err;
int i;
@@ -323,32 +324,58 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
- err = -ENOMEM;
- goto err_rq_wq_destroy;
- }
- rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe_info) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+ rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+ rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+ rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+ rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->wqe_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+ rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+ rq->wqe_sz = (priv->params.lro_en) ?
+ priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
+ byte_count = rq->wqe_sz;
+ byte_count |= MLX5_HW_START_PADDING;
+ }
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
- u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
- wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count =
- cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ wqe->data.byte_count = cpu_to_be32(byte_count);
}
+ rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = &priv->tstamp;
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
+ rq->mkey_be = c->mkey_be;
+ rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
return 0;
@@ -360,7 +387,14 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- kfree(rq->skb);
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ kfree(rq->wqe_info);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ kfree(rq->skb);
+ }
+
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -389,6 +423,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -403,7 +438,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+ int next_state)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -431,6 +467,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+ MLX5_SET(rqc, rqc, vsd, vsd);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -457,6 +523,8 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
+ struct mlx5e_sq *sq = &c->icosq;
+ u16 pi = sq->pc & sq->wq.sz_m1;
int err;
err = mlx5e_create_rq(c, param, rq);
@@ -467,12 +535,15 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
- mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -486,17 +557,25 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
+ int tout = 0;
+ int err;
+
clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
- mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
+ tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+
+ if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
/* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
napi_synchronize(&rq->channel->napi);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
@@ -538,10 +617,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
- int txq_ix;
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -566,8 +644,24 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- txq_ix = c->ix + tc * priv->params.num_channels;
- sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ if (param->icosq) {
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
+ wq_sz,
+ GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!sq->ico_wqe_info) {
+ err = -ENOMEM;
+ goto err_free_sq_db;
+ }
+ } else {
+ int txq_ix;
+
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ priv->txq_to_sq_map[txq_ix] = sq;
+ }
sq->pdev = c->pdev;
sq->tstamp = &priv->tstamp;
@@ -576,10 +670,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
- priv->txq_to_sq_map[txq_ix] = sq;
return 0;
+err_free_sq_db:
+ mlx5e_free_sq_db(sq);
+
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -594,6 +690,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
+ kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -622,10 +719,10 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
- MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -700,9 +797,11 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_disable_sq;
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- netdev_tx_reset_queue(sq->txq);
- netif_tx_start_queue(sq->txq);
+ if (sq->txq) {
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+ }
return 0;
@@ -723,21 +822,37 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
- netif_tx_disable_queue(sq->txq);
+ int tout = 0;
+ int err;
- /* ensure hw is notified of all pending wqes */
- if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq, true);
+ if (sq->txq) {
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
+ netif_tx_disable_queue(sq->txq);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_ERR);
+ if (err)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
+
+ /* wait till sq is empty, unless a TX timeout occurred on this SQ */
+ while (sq->cc != sq->pc &&
+ !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
+ msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
+ if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ }
/* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
napi_synchronize(&sq->channel->napi);
+ mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
mlx5e_destroy_sq(sq);
}
@@ -985,10 +1100,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_tx_cqs(c, cparam);
+ err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
if (err)
goto err_napi_del;
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_close_icosq_cq;
+
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
priv->params.rx_cq_moderation_usec,
priv->params.rx_cq_moderation_pkts);
@@ -997,10 +1116,14 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
napi_enable(&c->napi);
- err = mlx5e_open_sqs(c, cparam);
+ err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
if (err)
goto err_disable_napi;
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_close_icosq;
+
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
@@ -1013,6 +1136,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_close_sqs:
mlx5e_close_sqs(c);
+err_close_icosq:
+ mlx5e_close_sq(&c->icosq);
+
err_disable_napi:
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
@@ -1020,6 +1146,9 @@ err_disable_napi:
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
+err_close_icosq_cq:
+ mlx5e_close_cq(&c->icosq.cq);
+
err_napi_del:
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1032,9 +1161,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
+ mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
+ mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1049,11 +1180,23 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ priv->params.mpwqe_log_num_strides - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ priv->params.mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ }
+
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
@@ -1068,17 +1211,27 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
}
-static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
- struct mlx5e_sq_param *param)
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
- MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->pdn);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+
param->max_inline = priv->params.tx_max_inline;
}
@@ -1094,8 +1247,22 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
+ u8 log_cq_size;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = priv->params.log_rq_size +
+ priv->params.mpwqe_log_num_strides;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ log_cq_size = priv->params.log_rq_size;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+ if (priv->params.rx_cqe_compress) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ }
mlx5e_build_common_cq_param(priv, param);
}
@@ -1105,25 +1272,52 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
}
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
- struct mlx5e_channel_param *cparam)
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param,
+ u8 log_wq_size)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param,
+ u8 log_wq_size)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+ param->icosq = true;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
{
- memset(cparam, 0, sizeof(*cparam));
+ u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
}
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channel_param cparam;
+ struct mlx5e_channel_param *cparam;
int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
@@ -1135,12 +1329,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
- if (!priv->channel || !priv->txq_to_sq_map)
+ cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+ if (!priv->channel || !priv->txq_to_sq_map || !cparam)
goto err_free_txq_to_sq_map;
- mlx5e_build_channel_param(priv, &cparam);
+ mlx5e_build_channel_param(priv, cparam);
+
for (i = 0; i < nch; i++) {
- err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
@@ -1151,6 +1348,12 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
+ kfree(cparam);
return 0;
err_close_channels:
@@ -1160,6 +1363,7 @@ err_close_channels:
err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel);
+ kfree(cparam);
return err;
}
@@ -1168,6 +1372,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
@@ -1199,48 +1409,36 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
int ix = i;
+ u32 rqn;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- MLX5_SET(rqtc, rqtc, rq_num[i],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn);
+ rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
}
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
- enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+ int ix)
{
+ u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
- switch (rqt_ix) {
- case MLX5E_INDIRECTION_RQT:
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
- break;
-
- default: /* MLX5E_SINGLE_RQ_RQT */
- MLX5_SET(rqtc, rqtc, rq_num[0],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[0]->rq.rqn :
- priv->drop_rq.rqn);
-
- break;
- }
+ MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, int ix, u32 *rqtn)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
int err;
-
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+ u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1252,26 +1450,73 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+ err = mlx5_core_create_rqt(mdev, in, inlen, rqtn);
kvfree(in);
+ return err;
+}
+
+static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, u32 rqtn)
+{
+ mlx5_core_destroy_rqt(priv->mdev, rqtn);
+}
+
+static int mlx5e_create_rqts(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ u32 *rqtn;
+ int err;
+ int ix;
+
+ /* Indirect RQT */
+ rqtn = &priv->indir_rqtn;
+ err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqtn);
+ if (err)
+ return err;
+
+ /* Direct RQTs */
+ for (ix = 0; ix < nch; ix++) {
+ rqtn = &priv->direct_tir[ix].rqtn;
+ err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqtn);
+ if (err)
+ goto err_destroy_rqts;
+ }
+
+ return 0;
+
+err_destroy_rqts:
+ for (ix--; ix >= 0; ix--)
+ mlx5e_destroy_rqt(priv, priv->direct_tir[ix].rqtn);
+
+ mlx5e_destroy_rqt(priv, priv->indir_rqtn);
return err;
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_destroy_rqts(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5e_destroy_rqt(priv, priv->direct_tir[i].rqtn);
+
+ mlx5e_destroy_rqt(priv, priv->indir_rqtn);
+}
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
+ u32 *in;
int err;
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
@@ -1280,27 +1525,31 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
- err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+ err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
- mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
- mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ u32 rqtn;
+ int ix;
+
+ rqtn = priv->indir_rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ for (ix = 0; ix < priv->params.num_channels; ix++) {
+ rqtn = priv->direct_tir[ix].rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+ }
}
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1345,6 +1594,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int inlen;
int err;
int tt;
+ int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1356,23 +1606,32 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv);
- for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->indir_tirn[tt], in,
+ inlen);
if (err)
- break;
+ goto free_in;
}
+ for (ix = 0; ix < mlx5e_get_max_num_channels(mdev); ix++) {
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+ in, inlen);
+ if (err)
+ goto free_in;
+ }
+
+free_in:
kvfree(in);
return err;
}
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
- u32 tirn)
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
{
void *in;
int inlen;
int err;
+ int i;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1381,25 +1640,23 @@ static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
-
- kvfree(in);
-
- return err;
-}
-
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
- int err;
- int i;
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
+ err = mlx5_core_modify_tir(priv->mdev, priv->indir_tirn[i], in,
+ inlen);
+ if (err)
+ return err;
+ }
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
- priv->tirn[i]);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5_core_modify_tir(priv->mdev,
+ priv->direct_tir[i].tirn, in,
+ inlen);
if (err)
return err;
}
+ kvfree(in);
+
return 0;
}
@@ -1464,8 +1721,11 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
@@ -1503,6 +1763,9 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_redirect_rqts(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+ priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -1710,7 +1973,8 @@ static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
mlx5e_destroy_tis(priv, tc);
}
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ enum mlx5e_traffic_types tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
@@ -1731,19 +1995,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
- break;
- default:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_INDIRECTION_RQT]);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- break;
- }
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqtn);
+ mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
@@ -1823,64 +2076,107 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
}
}
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ u32 rqtn)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+ MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+{
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
void *tirc;
int inlen;
+ u32 *tirn;
int err;
+ u32 *in;
+ int ix;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ /* indirect tirs */
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, inlen);
+ tirn = &priv->indir_tirn[tt];
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+ err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ if (err)
+ goto err_destroy_tirs;
+ }
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ /* direct tirs */
+ for (ix = 0; ix < nch; ix++) {
+ memset(in, 0, inlen);
+ tirn = &priv->direct_tir[ix].tirn;
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_direct_tir_ctx(priv, tirc,
+ priv->direct_tir[ix].rqtn);
+ err = mlx5_core_create_tir(priv->mdev, in, inlen, tirn);
+ if (err)
+ goto err_destroy_ch_tirs;
+ }
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+ kvfree(in);
+
+ return 0;
+
+err_destroy_ch_tirs:
+ for (ix--; ix >= 0; ix--)
+ mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[ix].tirn);
+
+err_destroy_tirs:
+ for (tt--; tt >= 0; tt--)
+ mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[tt]);
kvfree(in);
return err;
}
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
+ int nch = mlx5e_get_max_num_channels(priv->mdev);
+ int i;
+
+ for (i = 0; i < nch; i++)
+ mlx5_core_destroy_tir(priv->mdev, priv->direct_tir[i].tirn);
+
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5_core_destroy_tir(priv->mdev, priv->indir_tirn[i]);
}
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
{
- int err;
+ int err = 0;
int i;
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_create_tir(priv, i);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
if (err)
- goto err_destroy_tirs;
+ return err;
}
return 0;
-
-err_destroy_tirs:
- for (i--; i >= 0; i--)
- mlx5e_destroy_tir(priv, i);
-
- return err;
-}
-
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
-{
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_destroy_tir(priv, i);
}
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -1923,6 +2219,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
return mlx5e_configure_flower(priv, proto, tc->cls_flower);
case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, tc->cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
}
default:
return -EOPNOTSUPP;
@@ -1939,19 +2237,37 @@ static struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
-
- stats->rx_packets = vstats->rx_packets;
- stats->rx_bytes = vstats->rx_bytes;
- stats->tx_packets = vstats->tx_packets;
- stats->tx_bytes = vstats->tx_bytes;
- stats->multicast = vstats->rx_multicast_packets +
- vstats->tx_multicast_packets;
- stats->tx_errors = vstats->tx_error_packets;
- stats->rx_errors = vstats->rx_error_packets;
- stats->tx_dropped = vstats->tx_queue_dropped;
- stats->rx_crc_errors = 0;
- stats->rx_length_errors = 0;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+
+ stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->tx_dropped = sstats->tx_queue_dropped;
+
+ stats->rx_length_errors =
+ PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+ PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ stats->rx_crc_errors =
+ PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+ stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+ stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+ stats->tx_carrier_errors =
+ PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+ /* vport multicast also counts packets that are dropped due to steering
+ * or rx out of buffer
+ */
+ stats->multicast =
+ VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
return stats;
}
@@ -1980,50 +2296,154 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-static int mlx5e_set_features(struct net_device *netdev,
- netdev_features_t features)
+#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+ do { \
+ if (enable) \
+ netdev->features |= feature; \
+ else \
+ netdev->features &= ~feature; \
+ } while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
- netdev_features_t changes = features ^ netdev->features;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ int err;
mutex_lock(&priv->state_lock);
- if (changes & NETIF_F_LRO) {
- bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
-
- priv->params.lro_en = !!(features & NETIF_F_LRO);
- err = mlx5e_modify_tirs_lro(priv);
- if (err)
- mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
- err);
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_close_locked(priv->netdev);
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ priv->params.lro_en = enable;
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err) {
+ netdev_err(netdev, "lro modify failed, %d\n", err);
+ priv->params.lro_en = !enable;
}
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_open_locked(priv->netdev);
+
mutex_unlock(&priv->state_lock);
- if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- mlx5e_enable_vlan_filter(priv);
- else
- mlx5e_disable_vlan_filter(priv);
- }
+ return err;
+}
- if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
- mlx5e_tc_num_filters(priv)) {
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (enable)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+
+ return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (!enable && mlx5e_tc_num_filters(priv)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->params.vlan_strip_disable = !enable;
+ err = mlx5e_modify_rqs_vsd(priv, !enable);
+ if (err)
+ priv->params.vlan_strip_disable = enable;
+
+ mutex_unlock(&priv->state_lock);
+
return err;
}
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ if (enable)
+ err = mlx5e_arfs_enable(priv);
+ else
+ err = mlx5e_arfs_disable(priv);
+
+ return err;
+}
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlx5e_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ netdev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+ enable ? "Enable" : "Disable", feature, err);
+ return err;
+ }
+
+ MLX5E_SET_FEATURE(netdev, feature, enable);
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ int err;
+
+ err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+ set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, features,
+ NETIF_F_HW_VLAN_CTAG_FILTER,
+ set_feature_vlan_filter);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+ set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+ set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+ set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+ set_feature_arfs);
+#endif
+
+ return err ? -EINVAL : 0;
+}
+
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
@@ -2093,6 +2513,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
vlan, qos);
}
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
@@ -2149,7 +2584,6 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
-#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
static void mlx5e_add_vxlan_port(struct net_device *netdev,
sa_family_t sa_family, __be16 port)
{
@@ -2221,7 +2655,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
-#endif
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
@@ -2237,6 +2693,10 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2253,16 +2713,20 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
.ndo_add_vxlan_port = mlx5e_add_vxlan_port,
.ndo_del_vxlan_port = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
+ .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2317,25 +2781,121 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
}
#endif
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels)
{
+ int node = mdev->priv.numa_node;
+ int node_num_of_cores;
int i;
+ if (node == -1)
+ node = first_online_node;
+
+ node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+ if (node_num_of_cores)
+ num_channels = min_t(int, num_channels, node_num_of_cores);
+
for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+ enum pcie_link_width width;
+ enum pci_bus_speed speed;
+ int err = 0;
+
+ err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+ if (err)
+ return err;
+
+ if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ return -EINVAL;
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ *pci_bw = 2500 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ *pci_bw = 5000 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ *pci_bw = 8000 * width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return (link_speed && pci_bw &&
+ (pci_bw < 40000) && (pci_bw < link_speed));
+}
+
static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
struct net_device *netdev,
int num_channels)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.log_rq_size =
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+
+ /* set CQE compression */
+ priv->params.rx_cqe_compress_admin = false;
+ if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+ MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+ priv->params.rx_cqe_compress_admin =
+ cqe_compress_heuristic(link_speed, pci_bw);
+ }
+
+ priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
+
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz =
+ priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ priv->params.lro_en = true;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+
+ mlx5_core_info(mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
priv->params.rx_cq_moderation_usec =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
priv->params.rx_cq_moderation_pkts =
@@ -2345,15 +2905,13 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.tx_cq_moderation_pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.min_rx_wqes =
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, num_channels);
priv->params.lro_wqe_sz =
@@ -2371,6 +2929,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
@@ -2390,6 +2949,8 @@ static void mlx5e_build_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool fcs_supported;
+ bool fcs_enabled;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
@@ -2424,25 +2985,41 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
- netdev->hw_enc_features |= NETIF_F_RXHASH;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+ if (fcs_supported)
+ netdev->hw_features |= NETIF_F_RXALL;
+
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
+ if (fcs_enabled)
+ netdev->features &= ~NETIF_F_RXALL;
+
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
- FT_CAP(flow_table_modify))
- priv->netdev->hw_features |= NETIF_F_HW_TC;
+ FT_CAP(flow_table_modify)) {
+ netdev->hw_features |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+ netdev->hw_features |= NETIF_F_NTUPLE;
+#endif
+ }
netdev->features |= NETIF_F_HIGHDMA;
@@ -2476,6 +3053,61 @@ static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
return err;
}
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+ priv->q_counter = 0;
+ }
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_mkey_seg *mkc;
+ int inlen = sizeof(*in);
+ u64 npages =
+ mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = &in->seg;
+ mkc->status = MLX5_MKEY_STATUS_FREE;
+ mkc->flags = MLX5_PERM_UMR_EN |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_PERM_LOCAL_WRITE |
+ MLX5_ACCESS_MODE_MTT;
+
+ mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc->flags_pd = cpu_to_be32(priv->pdn);
+ mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
+ mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+ mkc->log2_page_size = PAGE_SHIFT;
+
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
+ NULL, NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
{
struct net_device *netdev;
@@ -2529,10 +3161,16 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_dealloc_transport_domain;
}
+ err = mlx5e_create_umr_mkey(priv);
+ if (err) {
+ mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+ goto err_destroy_mkey;
+ }
+
err = mlx5e_create_tises(priv);
if (err) {
mlx5_core_warn(mdev, "create tises failed, %d\n", err);
- goto err_destroy_mkey;
+ goto err_destroy_umr_mkey;
}
err = mlx5e_open_drop_rq(priv);
@@ -2541,37 +3179,33 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_destroy_tises;
}
- err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
+ err = mlx5e_create_rqts(priv);
if (err) {
- mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
+ mlx5_core_warn(mdev, "create rqts failed, %d\n", err);
goto err_close_drop_rq;
}
- err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- if (err) {
- mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
- goto err_destroy_rqt_indir;
- }
-
err = mlx5e_create_tirs(priv);
if (err) {
mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
- goto err_destroy_rqt_single;
+ goto err_destroy_rqts;
}
- err = mlx5e_create_flow_tables(priv);
+ err = mlx5e_create_flow_steering(priv);
if (err) {
- mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
+ mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
goto err_destroy_tirs;
}
- mlx5e_init_eth_addr(priv);
+ mlx5e_create_q_counter(priv);
+
+ mlx5e_init_l2_addr(priv);
mlx5e_vxlan_init(priv);
err = mlx5e_tc_init(priv);
if (err)
- goto err_destroy_flow_tables;
+ goto err_dealloc_q_counters;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
@@ -2583,8 +3217,11 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
goto err_tc_cleanup;
}
- if (mlx5e_vxlan_allowed(mdev))
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
vxlan_get_rx_port(netdev);
+ rtnl_unlock();
+ }
mlx5e_enable_async_events(priv);
queue_work(priv->wq, &priv->set_rx_mode_work);
@@ -2594,17 +3231,15 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
err_tc_cleanup:
mlx5e_tc_cleanup(priv);
-err_destroy_flow_tables:
- mlx5e_destroy_flow_tables(priv);
+err_dealloc_q_counters:
+ mlx5e_destroy_q_counter(priv);
+ mlx5e_destroy_flow_steering(priv);
err_destroy_tirs:
mlx5e_destroy_tirs(priv);
-err_destroy_rqt_single:
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_destroy_rqts:
+ mlx5e_destroy_rqts(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
@@ -2612,6 +3247,9 @@ err_close_drop_rq:
err_destroy_tises:
mlx5e_destroy_tises(priv);
+err_destroy_umr_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
+
err_destroy_mkey:
mlx5_core_destroy_mkey(mdev, &priv->mkey);
@@ -2645,22 +3283,20 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
- mlx5e_destroy_flow_tables(priv);
+ mlx5e_destroy_q_counter(priv);
+ mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_tirs(priv);
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_destroy_rqts(priv);
mlx5e_close_drop_rq(priv);
mlx5e_destroy_tises(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58d4e2f96..9f2a16a50 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,13 +42,149 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
}
-static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+ void *data)
+{
+ u32 ci = cqcc & cq->wq.sz_m1;
+
+ memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+ cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
+ cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+ rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+ cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+ u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+ u32 wq_sz = 1 << cq->wq.log_sz;
+ u32 ci = cqcc & cq->wq.sz_m1;
+ u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+ for (; ci < ci_top; ci++, n--) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+
+ if (unlikely(ci == wq_sz)) {
+ op_own = !op_own;
+ for (ci = 0; ci < n; ci++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+ }
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ u16 wqe_cnt_step;
+
+ cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+ cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
+ cq->title.op_own &= 0xf0;
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+
+ wqe_cnt_step =
+ rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+ cq->decmprs_wqe_counter =
+ (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_decompress_cqe(rq, cq, cqcc);
+ cq->title.rss_hash_type = 0;
+ cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int update_owner_only,
+ int budget_rem)
+{
+ u32 cqcc = cq->wq.cc + update_owner_only;
+ u32 cqe_count;
+ u32 i;
+
+ cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+ for (i = update_owner_only; i < cqe_count;
+ i++, cq->mini_arr_idx++, cqcc++) {
+ if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+ mlx5e_read_mini_arr_slot(cq, cqcc);
+
+ mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ }
+ mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+ cq->wq.cc = cqcc;
+ cq->decmprs_left -= cqe_count;
+ rq->stats.cqe_compress_pkts += cqe_count;
+
+ return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int budget_rem)
+{
+ mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+ mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+ mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ cq->mini_arr_idx++;
+
+ return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+ bool was_opened;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+ return;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->params.rx_cqe_compress == val)
+ goto unlock;
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.rx_cqe_compress = val;
+
+ if (was_opened)
+ mlx5e_open_locked(priv->netdev);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
struct sk_buff *skb;
dma_addr_t dma_addr;
- skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
if (unlikely(!skb))
return -ENOMEM;
@@ -62,10 +198,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb;
- skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
*((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+ wqe->data.addr = cpu_to_be64(dma_addr);
+ wqe->data.lkey = rq->mkey_be;
rq->skb[ix] = skb;
@@ -77,18 +212,427 @@ err_free_skb:
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void
+mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
+ len, DMA_FROM_DEVICE);
+}
+
+static inline void
+mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ /* No dma pre sync for fragmented MPWQE */
+}
+
+static inline void
+mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ &wi->dma_info.page[page_idx], frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ dma_sync_single_for_cpu(rq->pdev,
+ wi->umr.dma_info[page_idx].addr + frag_offset,
+ len, DMA_FROM_DEVICE);
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ wi->umr.dma_info[page_idx].page, frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ struct page *page = &wi->dma_info.page[page_idx];
+
+ skb_copy_to_linear_data(skb, page_address(page) + offset,
+ ALIGN(headlen, sizeof(long)));
+}
+
+static inline void
+mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+ unsigned int len;
+
+ /* Aligning len to sizeof(long) optimizes memcpy performance */
+ len = ALIGN(headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, 0,
+ page_address(dma_info->page) + offset,
+ len);
+ if (unlikely(offset + headlen > PAGE_SIZE)) {
+ dma_info++;
+ headlen_pg = len;
+ len = ALIGN(headlen - headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, headlen_pg,
+ page_address(dma_info->page),
+ len);
+ }
+}
+
+static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+{
+ return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+
+ memset(wqe, 0, sizeof(*wqe));
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->umr_mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_sq *sq = &rq->channel->icosq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_umr_wqe *wqe;
+ u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+ u16 pi;
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true);
+ }
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
+ sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ sq->pc += num_wqebbs;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi,
+ int i)
+{
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ wi->umr.dma_info[i].page = page;
+ wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
+ wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+ int i;
+
+ wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
+ MLX5_MPWRQ_PAGES_PER_WQE,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.dma_info))
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.mtt_no_align))
+ goto err_free_umr;
+
+ wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
+ goto err_free_mtt;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ goto err_unmap;
+ page_ref_add(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
+ wqe->data.lkey = rq->umr_mkey_be;
+ wqe->data.addr = cpu_to_be64(dma_offset);
+
+ return 0;
+
+err_unmap:
+ while (--i >= 0) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+
+err_free_mtt:
+ kfree(wi->umr.mtt_no_align);
+
+err_free_umr:
+ kfree(wi->umr.dma_info);
+
+err_out:
+ return -ENOMEM;
+}
+
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+ kfree(wi->umr.mtt_no_align);
+ kfree(wi->umr.dma_info);
+}
+
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ rq->stats.mpwqe_frag++;
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+}
+
+static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ gfp_t gfp_mask;
+ int i;
+
+ gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+ wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+ MLX5_MPWRQ_WQE_PAGE_ORDER);
+ if (unlikely(!wi->dma_info.page))
+ return -ENOMEM;
+
+ wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+ rq->wqe_sz, PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+ put_page(wi->dma_info.page);
+ return -ENOMEM;
+ }
+
+ /* We split the high-order page into order-0 ones and manage their
+ * reference counter to minimize the memory held by small skb fragments
+ */
+ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_add(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
+ wqe->data.lkey = rq->mkey_be;
+ wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+ return 0;
+}
+
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int i;
+
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_sub(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(&wi->dma_info.page[i]);
+ }
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ int err;
+
+ err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
+ if (unlikely(err)) {
+ err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
+#define RQ_CANNOT_POST(rq) \
+ (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
- if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ if (unlikely(RQ_CANNOT_POST(rq)))
return false;
while (!mlx5_wq_ll_is_full(wq)) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ int err;
- if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (unlikely(err)) {
+ if (err != -EBUSY)
+ rq->stats.buff_alloc_err++;
break;
+ }
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
}
@@ -101,7 +645,8 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !mlx5_wq_ll_is_full(wq);
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
@@ -112,7 +657,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+ u16 tot_len = cqe_bcnt - ETH_HLEN;
if (eth->h_proto == htons(ETH_P_IP)) {
tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
@@ -176,35 +721,43 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (likely(is_first_ethertype_ip(skb))) {
+ return;
+ }
+
+ if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
- } else {
- goto csum_none;
+ rq->stats.csum_complete++;
+ return;
}
- return;
-
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (cqe_is_tunneled(cqe)) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ rq->stats.csum_unnecessary_inner++;
+ }
+ return;
+ }
csum_none:
skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++;
}
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
- skb_put(skb, cqe_bcnt);
-
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe);
+ mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
@@ -213,10 +766,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
- mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-
- skb->protocol = eth_type_trans(skb, netdev);
-
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -227,52 +776,168 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
be16_to_cpu(cqe->vlan_info));
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+ mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+ skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ prefetch(skb->data);
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ skb_put(skb, cqe_bcnt);
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_mpw_info *wi,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_page_idx = page_idx;
+ u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ u32 frag_offset = head_offset + headlen;
+ u16 byte_cnt = cqe_bcnt - headlen;
+
+ if (unlikely(frag_offset >= PAGE_SIZE)) {
+ page_idx++;
+ frag_offset -= PAGE_SIZE;
+ }
+ wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
+
+ while (byte_cnt) {
+ u32 pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+ wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
+ page_idx++;
+ }
+ /* copy header */
+ wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
+ headlen);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ rq->stats.mpwqe_filler++;
+ goto mpwrq_cqe_out;
+ }
+
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+ sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ prefetch(skb->data);
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+ return;
+
+ wi->free_wqe(rq, wi);
+ mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- int work_done;
+ int work_done = 0;
- for (work_done = 0; work_done < budget; work_done++) {
- struct mlx5e_rx_wqe *wqe;
- struct mlx5_cqe64 *cqe;
- struct sk_buff *skb;
- __be16 wqe_counter_be;
- u16 wqe_counter;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ return 0;
- cqe = mlx5e_get_cqe(cq);
- if (!cqe)
- break;
+ if (cq->decmprs_left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
- mlx5_cqwq_pop(&cq->wq);
+ for (; work_done < budget; work_done++) {
+ struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
-
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (!cqe)
+ break;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
- goto wq_ll_pop;
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+ work_done +=
+ mlx5e_decompress_cqes_start(rq, cq,
+ budget - work_done);
+ continue;
}
- mlx5e_build_rx_skb(cqe, rq, skb);
- rq->stats.packets++;
- rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
- napi_gro_receive(cq->napi, skb);
+ mlx5_cqwq_pop(&cq->wq);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+ rq->handle_rx_cqe(rq, cqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
new file mode 100644
index 000000000..fcd490cc5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+ (*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+ (*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+
+struct counter_desc {
+ char format[ETH_GSTRING_LEN];
+ int offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
+ u64 rx_csum_none;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+ u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_frag;
+ u64 rx_buff_alloc_err;
+ u64 rx_cqe_compress_blks;
+ u64 rx_cqe_compress_pkts;
+
+ /* Special handling counters */
+ u64 link_down_events_phy;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+struct mlx5e_qcounter_stats {
+ u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+ vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+ __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO 8
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ { "rx_prio%d_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_prio%d_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_prio%d_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_prio%d_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+ u64 mpwqe_filler;
+ u64 mpwqe_frag;
+ u64 buff_alloc_err;
+ u64 cqe_compress_blks;
+ u64 cqe_compress_pkts;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+};
+
+struct mlx5e_sq_stats {
+ /* commonly accessed in data path */
+ u64 packets;
+ u64 bytes;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 csum_partial_inner;
+ u64 nop;
+ /* less likely accessed in data path */
+ u64 csum_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
+ NUM_PPORT_2863_COUNTERS + \
+ NUM_PPORT_2819_COUNTERS + \
+ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+ NUM_PPORT_PRIO)
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+ struct mlx5e_sw_stats sw;
+ struct mlx5e_qcounter_stats qcnt;
+ struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b3de09f13..704c3d304 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -46,43 +46,65 @@ struct mlx5e_tc_flow {
struct mlx5_flow_rule *rule;
};
-#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
u32 *match_c, u32 *match_v,
u32 action, u32 flow_tag)
{
- struct mlx5_flow_destination dest = {
- .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
- {.ft = priv->fts.vlan.t},
- };
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
bool table_created = false;
- if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
- priv->fts.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
- MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
- MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
- if (IS_ERR(priv->fts.tc.t)) {
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.vlan.ft.t;
+ } else {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter))
+ return ERR_CAST(counter);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = counter;
+ }
+
+ if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ priv->fs.tc.t =
+ mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+ MLX5E_TC_PRIO,
+ MLX5E_TC_TABLE_NUM_ENTRIES,
+ MLX5E_TC_TABLE_NUM_GROUPS,
+ 0);
+ if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- return ERR_CAST(priv->fts.tc.t);
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_create_ft;
}
table_created = true;
}
- rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
+ rule = mlx5_add_flow_rule(priv->fs.tc.t, MLX5_MATCH_OUTER_HEADERS,
match_c, match_v,
action, flow_tag,
- action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+ &dest);
+
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
- if (IS_ERR(rule) && table_created) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+err_add_rule:
+ if (table_created) {
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
+err_create_ft:
+ mlx5_fc_destroy(dev, counter);
return rule;
}
@@ -90,11 +112,17 @@ static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule)
{
+ struct mlx5_fc *counter = NULL;
+
+ counter = mlx5_flow_rule_counter(rule);
+
mlx5_del_flow_rule(rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+
if (!mlx5e_tc_num_filters(priv)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
}
@@ -284,6 +312,9 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (is_tcf_gact_shot(a)) {
*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ if (MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.flow_counter))
+ *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -310,7 +341,7 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
u32 *match_c;
u32 *match_v;
int err = 0;
@@ -376,7 +407,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_flow *flow;
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
@@ -392,6 +423,34 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_flow *flow;
+ struct tc_action *a;
+ struct mlx5_fc *counter;
+ u64 bytes;
+ u64 packets;
+ u64 lastuse;
+
+ flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+ tc->ht_params);
+ if (!flow)
+ return -EINVAL;
+
+ counter = mlx5_flow_rule_counter(flow->rule);
+ if (!counter)
+ return 0;
+
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+ tc_for_each_action(a, f->exts)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ return 0;
+}
+
static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
@@ -401,7 +460,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
int mlx5e_tc_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
@@ -418,12 +477,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
- if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ if (!IS_ERR_OR_NULL(tc->t)) {
+ mlx5_destroy_flow_table(tc->t);
+ tc->t = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index d677428dc..34bf903fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -43,9 +43,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f);
+
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{
- return atomic_read(&priv->fts.tc.ht.nelems);
+ return atomic_read(&priv->fs.tc.ht.nelems);
}
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1ffc7cb6f..5740b465e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -54,10 +54,11 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
sq->skb[pi] = NULL;
sq->pc++;
+ sq->stats.nop++;
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, 0);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
}
@@ -109,8 +110,20 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
@@ -122,7 +135,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
if (bf) {
u16 ihs = skb_headlen(skb);
@@ -134,7 +147,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
return skb_headlen(skb);
}
- return MLX5E_MIN_INLINE;
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -191,12 +204,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
@@ -309,14 +322,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, bf_sz);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
@@ -339,6 +353,35 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
+
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -350,6 +393,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
@@ -387,7 +433,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wi = &sq->wqe_info[ci];
if (unlikely(!skb)) { /* nop */
- sq->stats.nop++;
sqcc++;
continue;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bb4395ac..c38781fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,6 +49,60 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
return cqe;
}
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5_wq_cyc *wq;
+ struct mlx5_cqe64 *cqe;
+ struct mlx5e_sq *sq;
+ u16 sqcc;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (likely(!cqe))
+ return;
+
+ sq = container_of(cq, struct mlx5e_sq, cq);
+ wq = &sq->wq;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ do {
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+
+ mlx5_cqwq_pop(&cq->wq);
+ sqcc += icowi->num_wqebbs;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+ cqe->op_own);
+ break;
+ }
+
+ switch (icowi->opcode) {
+ case MLX5_OPCODE_NOP:
+ break;
+ case MLX5_OPCODE_UMR:
+ mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ icowi->opcode);
+ }
+
+ } while ((cqe = mlx5e_get_cqe(cq)));
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -64,6 +118,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+
+ mlx5e_poll_ico_cq(&c->icosq.cq);
+
busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy)
@@ -80,6 +137,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&c->icosq.cq);
return work_done;
}
@@ -89,7 +147,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
- barrier();
napi_schedule(cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 18fccec72..0e30602ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -202,7 +202,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
struct mlx5_eqe *eqe;
int eqes_found = 0;
int set_ci = 0;
- u32 cqn;
+ u32 cqn = -1;
u32 rsn;
u8 port;
@@ -320,6 +320,9 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
eq_update_ci(eq, 1);
+ if (cqn != -1)
+ tasklet_schedule(&eq->tasklet_ctx.task);
+
return eqes_found;
}
@@ -403,6 +406,12 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
@@ -436,6 +445,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bc3d9f8a7..aebbd6ccb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -77,16 +77,20 @@ struct vport_addr {
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ /* A flag indicating that mac was added due to mc promiscuous vport */
+ bool mc_promisc;
};
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
+ PROMISC_CHANGE = BIT(3),
};
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE)
+ MC_ADDR_CHANGE | \
+ PROMISC_CHANGE)
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
@@ -116,6 +120,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
if (events_mask & MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
+ if (events_mask & PROMISC_CHANGE)
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ event_on_promisc_change, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -323,30 +330,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
/* E-Switch FDB */
static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+ u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
- int match_header = MLX5_MATCH_OUTER_HEADERS;
- struct mlx5_flow_destination dest;
+ int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_destination dest;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ u8 *dmac_v = NULL;
+ u8 *dmac_c = NULL;
u32 *match_v;
u32 *match_c;
- u8 *dmac_v;
- u8 *dmac_c;
+ if (rx_rule)
+ match_header |= MLX5_MATCH_MISC_PARAMETERS;
match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
if (!match_v || !match_c) {
pr_warn("FDB: Failed to alloc match parameters\n");
goto out;
}
+
dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers.dmac_47_16);
dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, mac);
- /* Match criteria mask */
- memset(dmac_c, 0xff, 6);
+ if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+ ether_addr_copy(dmac_v, mac_v);
+ ether_addr_copy(dmac_c, mac_c);
+ }
+
+ if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+ mv_misc = MLX5_ADDR_OF(fte_match_param, match_v, misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+ }
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
@@ -361,7 +383,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
match_v,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
@@ -373,6 +395,39 @@ out:
return flow_rule;
}
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+
+ eth_broadcast_addr(mac_c);
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ mac_c[0] = 0x01;
+ mac_v[0] = 0x01;
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -401,34 +456,80 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size);
- if (IS_ERR_OR_NULL(fdb)) {
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
+ esw->fdb_table.fdb = fdb;
+ /* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
-
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
-
esw->fdb_table.addr_grp = g;
- esw->fdb_table.fdb = fdb;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.promisc_grp = g;
+
out:
- kfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(fdb))
- mlx5_destroy_flow_table(fdb);
+ if (err) {
+ if (!IS_ERR_OR_NULL(esw->fdb_table.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
+ esw->fdb_table.allmulti_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ esw->fdb_table.addr_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw->fdb_table.fdb = NULL;
+ }
+ }
+
+ kvfree(flow_group_in);
return err;
}
@@ -438,10 +539,14 @@ static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
+ mlx5_destroy_flow_group(esw->fdb_table.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.allmulti_grp);
mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
esw->fdb_table.addr_grp = NULL;
+ esw->fdb_table.allmulti_grp = NULL;
+ esw->fdb_table.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -511,6 +616,53 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return 0;
}
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+ struct vport_addr *vaddr,
+ struct esw_mc_addr *esw_mc)
+{
+ u8 *mac = vaddr->node.addr;
+ u32 vport_idx = 0;
+
+ for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ struct hlist_head *vport_hash = vport->mc_list;
+ struct vport_addr *iter_vaddr =
+ l2addr_hash_find(vport_hash,
+ mac,
+ struct vport_addr);
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+ vaddr->vport == vport_idx)
+ continue;
+ switch (vaddr->action) {
+ case MLX5_ACTION_ADD:
+ if (iter_vaddr)
+ continue;
+ iter_vaddr = l2addr_hash_add(vport_hash, mac,
+ struct vport_addr,
+ GFP_KERNEL);
+ if (!iter_vaddr) {
+ esw_warn(esw->dev,
+ "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+ mac, vport_idx);
+ continue;
+ }
+ iter_vaddr->vport = vport_idx;
+ iter_vaddr->flow_rule =
+ esw_fdb_set_vport_rule(esw,
+ mac,
+ vport_idx);
+ iter_vaddr->mc_promisc = true;
+ break;
+ case MLX5_ACTION_DEL:
+ if (!iter_vaddr)
+ continue;
+ mlx5_del_flow_rule(iter_vaddr->flow_rule);
+ l2addr_hash_del(iter_vaddr);
+ break;
+ }
+ }
+}
+
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
struct hlist_head *hash = esw->mc_table;
@@ -531,8 +683,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+ /* Add this multicast mac to all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
add:
- esw_mc->refcnt++;
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't increment the multicast ref count
+ */
+ if (!vaddr->mc_promisc)
+ esw_mc->refcnt++;
+
/* Forward MC MAC to vport */
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev,
@@ -568,9 +729,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
- if (--esw_mc->refcnt)
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't decrement the multicast ref count.
+ */
+ if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
return 0;
+ /* Remove this multicast mac from all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule);
@@ -643,10 +810,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->action = MLX5_ACTION_DEL;
}
+ if (!vport->enabled)
+ goto out;
+
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
mac_list, &size);
if (err)
- return;
+ goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
vport_num, is_uc ? "UC" : "MC", size);
@@ -660,6 +830,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
if (addr) {
addr->action = MLX5_ACTION_NONE;
+ /* If this mac was previously added because of allmulti
+ * promiscuous rx mode, its now converted to be original
+ * vport mac.
+ */
+ if (addr->mc_promisc) {
+ struct esw_mc_addr *esw_mc =
+ l2addr_hash_find(esw->mc_table,
+ mac_list[i],
+ struct esw_mc_addr);
+ if (!esw_mc) {
+ esw_warn(esw->dev,
+ "Failed to MAC(%pM) in mcast DB\n",
+ mac_list[i]);
+ continue;
+ }
+ esw_mc->refcnt++;
+ addr->mc_promisc = false;
+ }
continue;
}
@@ -674,13 +862,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->vport = vport_num;
addr->action = MLX5_ACTION_ADD;
}
+out:
kfree(mac_list);
}
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct l2addr_node *node;
+ struct vport_addr *addr;
+ struct hlist_head *hash;
+ struct hlist_node *tmp;
+ int hi;
+
+ hash = vport->mc_list;
+
+ for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+ u8 *mac = node->addr;
+
+ addr = l2addr_hash_find(hash, mac, struct vport_addr);
+ if (addr) {
+ if (addr->action == MLX5_ACTION_DEL)
+ addr->action = MLX5_ACTION_NONE;
+ continue;
+ }
+ addr = l2addr_hash_add(hash, mac, struct vport_addr,
+ GFP_KERNEL);
+ if (!addr) {
+ esw_warn(esw->dev,
+ "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+ mac, vport_num);
+ continue;
+ }
+ addr->vport = vport_num;
+ addr->action = MLX5_ACTION_ADD;
+ addr->mc_promisc = true;
+ }
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+ bool promisc, bool mc_promisc)
+{
+ struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+ goto promisc;
+
+ if (mc_promisc) {
+ vport->allmulti_rule =
+ esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ if (!allmulti_addr->uplink_rule)
+ allmulti_addr->uplink_rule =
+ esw_fdb_set_vport_allmulti_rule(esw,
+ UPLINK_VPORT);
+ allmulti_addr->refcnt++;
+ } else if (vport->allmulti_rule) {
+ mlx5_del_flow_rule(vport->allmulti_rule);
+ vport->allmulti_rule = NULL;
+
+ if (--allmulti_addr->refcnt > 0)
+ goto promisc;
+
+ if (allmulti_addr->uplink_rule)
+ mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+ allmulti_addr->uplink_rule = NULL;
+ }
+
+promisc:
+ if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+ return;
+
+ if (promisc) {
+ vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+ vport_num);
+ } else if (vport->promisc_rule) {
+ mlx5_del_flow_rule(vport->promisc_rule);
+ vport->promisc_rule = NULL;
+ }
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ int promisc_all = 0;
+ int promisc_uc = 0;
+ int promisc_mc = 0;
+ int err;
+
+ err = mlx5_query_nic_vport_promisc(esw->dev,
+ vport_num,
+ &promisc_uc,
+ &promisc_mc,
+ &promisc_all);
+ if (err)
+ return;
+ esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+ vport_num, promisc_all, promisc_mc);
+
+ if (!vport->trusted || !vport->enabled) {
+ promisc_uc = 0;
+ promisc_mc = 0;
+ promisc_all = 0;
+ }
+
+ esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ (promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
- struct mlx5_vport *vport =
- container_of(work, struct mlx5_vport, vport_change_handler);
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
@@ -699,6 +995,15 @@ static void esw_vport_change_handler(struct work_struct *work)
if (vport->enabled_events & MC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
+ }
+
+ if (vport->enabled_events & PROMISC_CHANGE) {
+ esw_update_vport_rx_mode(esw, vport->vport);
+ if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+ esw_update_vport_mc_promisc(esw, vport->vport);
+ }
+
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
@@ -709,15 +1014,477 @@ static void esw_vport_change_handler(struct work_struct *work)
vport->enabled_events);
}
+static void esw_vport_change_handler(struct work_struct *work)
+{
+ struct mlx5_vport *vport =
+ container_of(work, struct mlx5_vport, vport_change_handler);
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ mutex_lock(&esw->state_lock);
+ esw_vport_change_handle_locked(vport);
+ mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp = NULL;
+ struct mlx5_flow_group *drop_grp = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(vlan_grp)) {
+ err = PTR_ERR(vlan_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ vport->egress.acl = acl;
+ vport->egress.drop_grp = drop_grp;
+ vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+ kvfree(flow_group_in);
+ if (err && !IS_ERR_OR_NULL(vlan_grp))
+ mlx5_destroy_flow_group(vlan_grp);
+ if (err && !IS_ERR_OR_NULL(acl))
+ mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+ if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+ mlx5_del_flow_rule(vport->egress.drop_rule);
+
+ vport->egress.allowed_vlan = NULL;
+ vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+ mlx5_destroy_flow_group(vport->egress.drop_grp);
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.allowed_vlans_grp = NULL;
+ vport->egress.drop_grp = NULL;
+ vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.acl = acl;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.drop_grp = g;
+
+out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_spoofchk_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_spoofchk_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ }
+
+ kvfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+ mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+ vport->ingress.drop_rule = NULL;
+ vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+ mlx5_destroy_flow_group(vport->ingress.drop_grp);
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ vport->ingress.drop_grp = NULL;
+ vport->ingress.allow_spoofchk_only_grp = NULL;
+ vport->ingress.allow_untagged_only_grp = NULL;
+ vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u8 smac[ETH_ALEN];
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+ u8 *smac_v;
+
+ if (vport->spoofchk) {
+ err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(smac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
+ }
+ }
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ esw_vport_disable_ingress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_ingress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ if (vport->vlan || vport->qos)
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+
+ if (vport->spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ match_v,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, smac);
+ }
+
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->ingress.drop_rule =
+ mlx5_add_flow_rule(vport->ingress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.drop_rule)) {
+ err = PTR_ERR(vport->ingress.drop_rule);
+ pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.drop_rule = NULL;
+ goto out;
+ }
+
+out:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ u32 *match_v;
+ u32 *match_c;
+ int err = 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos) {
+ esw_vport_disable_egress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_egress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
+ if (!match_v || !match_c) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ /* Allowed vlan rule */
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
+
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rule(vport->egress.acl,
+ MLX5_MATCH_OUTER_HEADERS,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+ /* Drop others rule (star rule) */
+ memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ vport->egress.drop_rule =
+ mlx5_add_flow_rule(vport->egress.acl,
+ 0,
+ match_c,
+ match_v,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->egress.drop_rule)) {
+ err = PTR_ERR(vport->egress.drop_rule);
+ pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.drop_rule = NULL;
+ }
+out:
+ kfree(match_v);
+ kfree(match_c);
+ return err;
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
+ mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+ if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
@@ -725,53 +1492,29 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handler(&vport->vport_change_handler);
-
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = true;
- spin_unlock_irqrestore(&vport->lock, flags);
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ /* only PF is trusted by default */
+ vport->trusted = (vport_num) ? false : true;
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_vport *vport = &esw->vports[vport_num];
- struct l2addr_node *node;
- struct vport_addr *addr;
- struct hlist_node *tmp;
- int hi;
-
- for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
- for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+ mutex_unlock(&esw->state_lock);
}
static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
if (!vport->enabled)
return;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = false;
- vport->enabled_events = 0;
- spin_unlock_irqrestore(&vport->lock, flags);
+
+ synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,9 +1524,19 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- /* We don't assume VFs will cleanup after themselves */
- esw_cleanup_vport(esw, vport_num);
+ mutex_lock(&esw->state_lock);
+ /* We don't assume VFs will cleanup after themselves.
+ * Calling vport change handler while vport is disabled will cleanup
+ * the vport resources.
+ */
+ esw_vport_change_handle_locked(vport);
+ vport->enabled_events = 0;
+ if (vport_num) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
esw->enabled_vports--;
+ mutex_unlock(&esw->state_lock);
}
/* Public E-Switch API */
@@ -802,6 +1555,12 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
return -ENOTSUPP;
}
+ if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
esw_disable_vport(esw, 0);
@@ -824,6 +1583,7 @@ abort:
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
+ struct esw_mc_addr *mc_promisc;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
@@ -833,9 +1593,14 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
esw->enabled_vports);
+ mc_promisc = esw->mc_promisc;
+
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
+ if (mc_promisc && mc_promisc->uplink_rule)
+ mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
esw_destroy_fdb_table(esw);
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
@@ -845,7 +1610,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
- int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+ int total_vports = MLX5_TOTAL_VPORTS(dev);
+ struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -874,6 +1640,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
+ mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+ if (!mc_promisc) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ esw->mc_promisc = mc_promisc;
+
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -887,6 +1660,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ mutex_init(&esw->state_lock);
+
for (vport_num = 0; vport_num < total_vports; vport_num++) {
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -894,7 +1669,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
- spin_lock_init(&vport->lock);
}
esw->total_vports = total_vports;
@@ -925,6 +1699,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
+ kfree(esw->mc_promisc);
kfree(esw->vports);
kfree(esw);
}
@@ -942,10 +1717,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
}
vport = &esw->vports[vport_num];
- spin_lock(&vport->lock);
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
- spin_unlock(&vport->lock);
}
/* Vport Administration */
@@ -953,9 +1726,23 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
+ struct mlx5_vport *evport;
+ u64 node_guid;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -963,6 +1750,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
+ if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ mlx5_core_warn(esw->dev,
+ "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+ vport);
+ return -EPERM;
+ }
+
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
mlx5_core_warn(esw->dev,
@@ -971,6 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
+ mutex_lock(&esw->state_lock);
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -990,6 +1797,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
+ struct mlx5_vport *evport;
u16 vlan;
u8 qos;
@@ -998,6 +1806,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -1008,7 +1818,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
ivi->vlan = vlan;
ivi->qos = qos;
- ivi->spoofchk = 0;
+ ivi->spoofchk = evport->spoofchk;
return 0;
}
@@ -1016,6 +1826,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos)
{
+ struct mlx5_vport *evport;
+ int err = 0;
int set = 0;
if (!ESW_ALLOWED(esw))
@@ -1026,7 +1838,72 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan || qos)
set = 1;
- return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ evport = &esw->vports[vport];
+
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ if (err)
+ return err;
+
+ mutex_lock(&esw->state_lock);
+ evport->vlan = vlan;
+ evport->qos = qos;
+ if (evport->enabled) {
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ goto out;
+ err = esw_vport_egress_config(esw, evport);
+ }
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk)
+{
+ struct mlx5_vport *evport;
+ bool pschk;
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ pschk = evport->spoofchk;
+ evport->spoofchk = spoofchk;
+ if (evport->enabled)
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ evport->spoofchk = pschk;
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport, bool setting)
+{
+ struct mlx5_vport *evport;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ evport->trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+ mutex_unlock(&esw->state_lock);
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3416a428f..fd6800256 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -88,18 +88,40 @@ struct l2addr_node {
kfree(ptr); \
})
+struct vport_ingress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allow_rule;
+ struct mlx5_flow_rule *drop_rule;
+};
+
+struct vport_egress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allowed_vlans_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allowed_vlan;
+ struct mlx5_flow_rule *drop_rule;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
+ struct mlx5_flow_rule *promisc_rule;
+ struct mlx5_flow_rule *allmulti_rule;
struct work_struct vport_change_handler;
- /* This spinlock protects access to vport data, between
- * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
- * once vport marked as disabled new interrupts are discarded.
- */
- spinlock_t lock; /* vport events sync */
+ struct vport_ingress ingress;
+ struct vport_egress egress;
+
+ u16 vlan;
+ u8 qos;
+ bool spoofchk;
+ bool trusted;
bool enabled;
u16 enabled_events;
};
@@ -113,6 +135,8 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
};
struct mlx5_eswitch {
@@ -124,6 +148,11 @@ struct mlx5_eswitch {
struct mlx5_vport *vports;
int total_vports;
int enabled_vports;
+ /* Synchronize between vport change events
+ * and async SRIOV admin state changes
+ */
+ struct mutex state_lock;
+ struct esw_mc_addr *mc_promisc;
};
/* E-Switch API */
@@ -138,6 +167,10 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state);
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport_num, bool setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index f46f1db0f..a5bb6b695 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ if (vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ MLX5_SET(create_flow_table_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
@@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -207,22 +232,29 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
- MLX5_SET(flow_context, in_flow_context, destination_list_size,
- fte->dests_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ int list_size = 0;
+
list_for_each_entry(dst, &fte->node.children, node.list) {
unsigned int id;
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
@@ -233,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
}
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
}
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
}
+
+ if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int list_size = 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ dst->dest_attr.counter->id);
+ in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
@@ -254,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte)
{
int opmod;
- int modify_mask;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
- modify_mask = 1 <<
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
}
@@ -285,8 +338,78 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, index);
+ if (ft->vport) {
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return err;
}
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+
+ return 0;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter)];
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ void *stats;
+ int err = 0;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 9814d4784..fc4f7b83f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -34,6 +34,7 @@
#define _MLX5_FS_CMD_
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
@@ -61,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte);
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
@@ -69,4 +71,9 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 89cce97d4..e912a3d25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,18 +40,18 @@
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
-#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
...) {.type = FS_TYPE_PRIO,\
.min_ft_level = min_level_val,\
- .max_ft = max_ft_val,\
+ .num_levels = num_levels_val,\
.num_leaf_prios = num_prios_val,\
.caps = caps_val,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
-#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\
- ADD_PRIO(num_prios_val, 0, max_ft_val, {},\
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
@@ -67,17 +67,20 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} }
-#define LEFTOVERS_MAX_FT 1
+#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
-#define BY_PASS_PRIO_MAX_FT 1
-#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
- LEFTOVERS_MAX_FT)
-#define KERNEL_MAX_FT 3
-#define KERNEL_NUM_PRIOS 2
-#define KENREL_MIN_LEVEL 2
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (KERNEL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+ LEFTOVERS_NUM_PRIOS)
-#define ANCHOR_MAX_FT 1
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
+
+#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
struct node_caps {
@@ -92,7 +95,7 @@ static struct init_tree_node {
int min_ft_level;
int num_leaf_prios;
int prio;
- int max_ft;
+ int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
.ar_size = 4,
@@ -102,17 +105,20 @@ static struct init_tree_node {
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))),
- ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
FS_CAP(flow_table_properties_nic_receive.modify_root),
FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
}
};
@@ -222,19 +228,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
-static unsigned int find_next_free_level(struct fs_prio *prio)
-{
- if (!list_empty(&prio->node.children)) {
- struct mlx5_flow_table *ft;
-
- ft = list_last_entry(&prio->node.children,
- struct mlx5_flow_table,
- node.list);
- return ft->level + 1;
- }
- return prio->start_level;
-}
-
static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
{
unsigned int i;
@@ -351,6 +344,7 @@ static void del_rule(struct fs_node *node)
struct mlx5_flow_group *fg;
struct fs_fte *fte;
u32 *match_value;
+ int modify_mask;
struct mlx5_core_dev *dev = get_dev(node);
int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
int err;
@@ -374,8 +368,11 @@ static void del_rule(struct fs_node *node)
}
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
err = mlx5_cmd_update_fte(dev, ft,
- fg->id, fte);
+ fg->id,
+ modify_mask,
+ fte);
if (err)
pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
@@ -464,7 +461,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type)
{
struct mlx5_flow_table *ft;
@@ -476,6 +473,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->type = table_type;
+ ft->vport = vport;
ft->max_fte = max_fte;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -615,12 +613,13 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
-static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest)
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct fs_fte *fte;
+ int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
@@ -632,7 +631,9 @@ static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id,
+ modify_mask,
+ fte);
unlock_ref_node(&fte->node);
return err;
@@ -693,9 +694,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
return err;
}
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int max_fte)
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct list_head *prev = &prio->node.children;
+ struct mlx5_flow_table *iter;
+
+ fs_for_each_ft(iter, prio) {
+ if (iter->level > ft->level)
+ break;
+ prev = &iter->node.list;
+ }
+ list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ u16 vport, int prio,
+ int max_fte, u32 level)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
@@ -716,12 +731,16 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
err = -EINVAL;
goto unlock_root;
}
- if (fs_prio->num_ft == fs_prio->max_ft) {
+ if (level >= fs_prio->num_levels) {
err = -ENOSPC;
goto unlock_root;
}
-
- ft = alloc_flow_table(find_next_free_level(fs_prio),
+ /* The level is related to the
+ * priority level range.
+ */
+ level += fs_prio->start_level;
+ ft = alloc_flow_table(level,
+ vport,
roundup_pow_of_two(max_fte),
root->table_type);
if (!ft) {
@@ -732,7 +751,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ilog2(ft->max_fte);
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -742,7 +761,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
goto destroy_ft;
lock_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
- list_add_tail(&ft->node.list, &fs_prio->node.children);
+ list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
unlock_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
@@ -756,17 +775,32 @@ unlock_root:
return ERR_PTR(err);
}
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level)
+{
+ return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level, u16 vport)
+{
+ return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- int max_num_groups)
+ int max_num_groups,
+ u32 level)
{
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
+ ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
if (IS_ERR(ft))
return ft;
@@ -850,6 +884,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
{
struct mlx5_flow_table *ft;
struct mlx5_flow_rule *rule;
+ int modify_mask = 0;
int err;
rule = alloc_rule(dest);
@@ -865,14 +900,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
list_add(&rule->node.list, &fte->node.children);
else
list_add_tail(&rule->node.list, &fte->node.children);
- if (dest)
+ if (dest) {
fte->dests_size++;
+
+ modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ }
+
if (fte->dests_size == 1 || !dest)
err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte);
else
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id, modify_mask, fte);
if (err)
goto free_rule;
@@ -1065,6 +1106,50 @@ unlock_fg:
return rule;
}
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *dst;
+ struct fs_fte *fte;
+
+ fs_get_obj(fte, rule->node.parent);
+
+ fs_for_each_dst(dst, fte) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ return dst->dest_attr.counter;
+ }
+
+ return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+ return !counter;
+
+ if (!counter)
+ return false;
+
+ /* Hardware support counter for a drop action only */
+ return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+ u32 action,
+ struct mlx5_flow_table *ft)
+{
+ if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+ return counter_is_valid(dest->counter, action);
+
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return true;
+
+ if (!dest || ((dest->type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+ (dest->ft->level <= ft->level)))
+ return false;
+ return true;
+}
+
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
@@ -1077,7 +1162,7 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule;
- if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+ if (!dest_is_valid(dest, action, ft))
return ERR_PTR(-EINVAL);
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
@@ -1207,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
@@ -1294,6 +1379,16 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
return &dev->priv.fdb_root_ns->ns;
else
return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (dev->priv.esw_egress_root_ns)
+ return &dev->priv.esw_egress_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (dev->priv.esw_ingress_root_ns)
+ return &dev->priv.esw_ingress_root_ns->ns;
+ else
+ return NULL;
default:
return NULL;
}
@@ -1311,7 +1406,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_get_flow_namespace);
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned prio, int max_ft)
+ unsigned int prio, int num_levels)
{
struct fs_prio *fs_prio;
@@ -1322,7 +1417,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
fs_prio->node.type = FS_TYPE_PRIO;
tree_init_node(&fs_prio->node, 1, NULL);
tree_add_node(&fs_prio->node, &ns->node);
- fs_prio->max_ft = max_ft;
+ fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
list_add_tail(&fs_prio->node.list, &ns->node.children);
@@ -1353,14 +1448,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ns;
}
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node
- *prio_metadata)
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+ struct init_tree_node *prio_metadata)
{
struct fs_prio *fs_prio;
int i;
for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
- fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft);
+ fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
}
@@ -1387,7 +1482,7 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node,
- int index)
+ int prio)
{
int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
@@ -1405,8 +1500,8 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node);
if (init_node->num_leaf_prios)
- return create_leaf_prios(fs_ns, init_node);
- fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
+ return create_leaf_prios(fs_ns, prio, init_node);
+ fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
base = &fs_prio->node;
@@ -1419,11 +1514,16 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
} else {
return -EINVAL;
}
+ prio = 0;
for (i = 0; i < init_node->ar_size; i++) {
err = init_root_tree_recursive(dev, &init_node->children[i],
- base, init_node, i);
+ base, init_node, prio);
if (err)
return err;
+ if (init_node->children[i].type == FS_TYPE_PRIO &&
+ init_node->children[i].num_leaf_prios) {
+ prio += init_node->children[i].num_leaf_prios;
+ }
}
return 0;
@@ -1479,9 +1579,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
struct fs_prio *prio;
fs_for_each_prio(prio, ns) {
- /* This updates prio start_level and max_ft */
+ /* This updates prio start_level and num_levels */
set_prio_attrs_in_prio(prio, acc_level);
- acc_level += prio->max_ft;
+ acc_level += prio->num_levels;
}
return acc_level;
}
@@ -1493,11 +1593,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
prio->start_level = acc_level;
fs_for_each_ns(ns, prio)
- /* This updates start_level and max_ft of ns's priority descendants */
+ /* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
- if (!prio->max_ft)
- prio->max_ft = acc_level_ns - prio->start_level;
- WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
+ if (!prio->num_levels)
+ prio->num_levels = acc_level_ns - prio->start_level;
+ WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
}
static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
@@ -1508,12 +1608,13 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
fs_for_each_prio(prio, ns) {
set_prio_attrs_in_prio(prio, start_level);
- start_level += prio->max_ft;
+ start_level += prio->num_levels;
}
}
#define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1
+#define ANCHOR_LEVEL 0
static int create_anchor_flow_table(struct mlx5_core_dev
*dev)
{
@@ -1523,7 +1624,7 @@ static int create_anchor_flow_table(struct mlx5_core_dev
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+ ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
mlx5_core_err(dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
@@ -1666,8 +1767,14 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev)
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return;
+
cleanup_root_ns(dev);
cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns);
+ cleanup_single_prio_root_ns(dev, dev->priv.esw_ingress_root_ns);
+ mlx5_cleanup_fc_stats(dev);
}
static int init_fdb_root_ns(struct mlx5_core_dev *dev)
@@ -1688,20 +1795,76 @@ static int init_fdb_root_ns(struct mlx5_core_dev *dev)
}
}
+static int init_egress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_egress_root_ns = create_root_ns(dev, FS_FT_ESW_EGRESS_ACL);
+ if (!dev->priv.esw_egress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_egress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
+static int init_ingress_acl_root_ns(struct mlx5_core_dev *dev)
+{
+ struct fs_prio *prio;
+
+ dev->priv.esw_ingress_root_ns = create_root_ns(dev, FS_FT_ESW_INGRESS_ACL);
+ if (!dev->priv.esw_ingress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&dev->priv.esw_ingress_root_ns->ns, 0, MLX5_TOTAL_VPORTS(dev));
+ if (IS_ERR(prio))
+ return PTR_ERR(prio);
+ else
+ return 0;
+}
+
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
int err = 0;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
err = init_root_ns(dev);
if (err)
- return err;
+ goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- cleanup_root_ns(dev);
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(dev);
+ if (err)
+ goto err;
+ }
}
+ return 0;
+err:
+ mlx5_cleanup_fs(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index f37a6248a..aa41a7314 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -45,8 +45,10 @@ enum fs_node_type {
};
enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_FDB = 0X4,
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
};
enum fs_fte_status {
@@ -79,6 +81,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_table {
struct fs_node node;
u32 id;
+ u16 vport;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
@@ -93,6 +96,28 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
};
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ struct list_head list;
+
+ /* last{packets,bytes} members are used when calculating the delta since
+ * last reading
+ */
+ u64 lastpackets;
+ u64 lastbytes;
+
+ u16 id;
+ bool deleted;
+ bool aging;
+
+ struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
@@ -102,12 +127,13 @@ struct fs_fte {
u32 index;
u32 action;
enum fs_fte_status status;
+ struct mlx5_fc *counter;
};
/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
struct fs_node node;
- unsigned int max_ft;
+ unsigned int num_levels;
unsigned int start_level;
unsigned int prio;
unsigned int num_ft;
@@ -143,6 +169,9 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
};
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
new file mode 100644
index 000000000..164dc37fd
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ * - mlx5_fc_create() only adds to an addlist to be used by
+ * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ * - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ * - mark a counter as deleted
+ * - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ * user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ * destroy/dump - no conflict (see destroy)
+ * query/dump - packets and bytes might be inconsistent (since update is not
+ * atomic)
+ * query/create - no conflict (see create)
+ * since every create/destroy spawn the work, only after necessary time has
+ * elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+ struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+ priv.fc_stats.work.work);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ unsigned long now = jiffies;
+ struct mlx5_fc *counter;
+ struct mlx5_fc *tmp;
+ int err = 0;
+
+ spin_lock(&fc_stats->addlist_lock);
+
+ list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+ if (!list_empty(&fc_stats->list))
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+ spin_unlock(&fc_stats->addlist_lock);
+
+ list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
+
+ if (counter->deleted) {
+ list_del(&counter->list);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ continue;
+ }
+
+ if (time_before(now, fc_stats->next_query))
+ continue;
+
+ err = mlx5_cmd_fc_query(dev, counter->id, &packets, &bytes);
+ if (err) {
+ pr_err("Error querying stats for counter id %d\n",
+ counter->id);
+ continue;
+ }
+
+ if (packets == c->packets)
+ continue;
+
+ c->lastuse = jiffies;
+ c->packets = packets;
+ c->bytes = bytes;
+ }
+
+ if (time_after_eq(now, fc_stats->next_query))
+ fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto err_out;
+
+ if (aging) {
+ counter->aging = true;
+
+ spin_lock(&fc_stats->addlist_lock);
+ list_add(&counter->list, &fc_stats->addlist);
+ spin_unlock(&fc_stats->addlist_lock);
+
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ }
+
+ return counter;
+
+err_out:
+ kfree(counter);
+
+ return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (!counter)
+ return;
+
+ if (counter->aging) {
+ counter->deleted = true;
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ return;
+ }
+
+ mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ INIT_LIST_HEAD(&fc_stats->list);
+ INIT_LIST_HEAD(&fc_stats->addlist);
+ spin_lock_init(&fc_stats->addlist_lock);
+
+ fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+ if (!fc_stats->wq)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+ return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ struct mlx5_fc *tmp;
+
+ cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+ destroy_workqueue(dev->priv.fc_stats.wq);
+ dev->priv.fc_stats.wq = NULL;
+
+ list_splice_tail_init(&fc_stats->addlist, &fc_stats->list);
+
+ list_for_each_entry_safe(counter, tmp, &fc_stats->list, list) {
+ list_del(&counter->list);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+{
+ struct mlx5_fc_cache c;
+
+ c = counter->cache;
+
+ *bytes = c.bytes - counter->lastbytes;
+ *packets = c.packets - counter->lastpackets;
+ *lastuse = c.lastuse;
+
+ counter->lastbytes = c.bytes;
+ counter->lastpackets = c.packets;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f5deb642d..96a59463a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -176,11 +182,11 @@ static const char *hsynd_str(u8 synd)
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
case MLX5_HEALTH_SYNDR_EQ_INV:
- return "Invalid EQ refrenced";
+ return "Invalid EQ referenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
case MLX5_HEALTH_SYNDR_HIGH_TEMP:
- return "High temprature";
+ return "High temperature";
default:
return "unrecognized error";
}
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6892746fd..6695893dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -48,6 +48,9 @@
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
#include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
@@ -660,11 +663,34 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
}
EXPORT_SYMBOL(mlx5_vector2eqn);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq;
+
+ spin_lock(&table->lock);
+ list_for_each_entry(eq, &table->comp_eqs_list, list)
+ if (eq->eqn == eqn) {
+ spin_unlock(&table->lock);
+ return eq;
+ }
+
+ spin_unlock(&table->lock);
+
+ return ERR_PTR(-ENOENT);
+}
+
static void free_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n;
+#ifdef CONFIG_RFS_ACCEL
+ if (dev->rmap) {
+ free_irq_cpu_rmap(dev->rmap);
+ dev->rmap = NULL;
+ }
+#endif
spin_lock(&table->lock);
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list);
@@ -691,6 +717,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_vec = table->num_comp_vectors;
nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+ dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+ if (!dev->rmap)
+ return -ENOMEM;
+#endif
for (i = 0; i < ncomp_vec; i++) {
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -698,6 +729,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
+#ifdef CONFIG_RFS_ACCEL
+ irq_cpu_rmap_add(dev->rmap,
+ dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
@@ -1387,46 +1422,31 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
*/
-static void wait_vital(struct pci_dev *pdev)
+static int wait_vital(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct mlx5_core_health *health = &dev->priv.health;
const int niter = 100;
+ u32 last_count = 0;
u32 count;
- u16 did;
int i;
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
for (i = 0; i < niter; i++) {
count = ioread32be(health->health_counter);
if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
}
msleep(50);
}
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
+ return -ETIMEDOUT;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
@@ -1438,7 +1458,11 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
pci_save_state(pdev);
- wait_vital(pdev);
+ err = wait_vital(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return;
+ }
err = mlx5_load_one(dev, priv);
if (err)
@@ -1473,8 +1497,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0b0b226c7..2f86ec6fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,6 +42,8 @@
#define DRIVER_VERSION "3.0-1"
#define DRIVER_RELDATE "January 2015"
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -100,6 +102,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+void mlx5_cq_tasklet_cb(unsigned long data);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545..32dea3524 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
@@ -373,6 +372,33 @@ out_free:
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 53cc1e2c6..3e35611b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+ u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(mlcr_reg, in, local_port, 1);
+ MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask)
{
@@ -297,6 +310,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+ int module_mapping;
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return err;
+
+ module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+ *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+ return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data)
+{
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+ int module_num;
+ u16 i2c_addr;
+ int status;
+ int err;
+ void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+ err = mlx5_query_module_num(dev, &module_num);
+ if (err)
+ return err;
+
+ memset(in, 0, sizeof(in));
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+ if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+ offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ /* Cross pages read, read until offset 256 in low page */
+ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+ i2c_addr = MLX5_I2C_ADDR_LOW;
+ if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+ i2c_addr = MLX5_I2C_ADDR_HIGH;
+ offset -= MLX5_EEPROM_PAGE_LENGTH;
+ }
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+ MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, device_address, offset);
+ MLX5_SET(mcia_reg, in, size, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ memcpy(data, ptr, size);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
@@ -607,3 +696,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ return mlx5_core_access_reg(mdev, in, inlen, out,
+ sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+ /* Default values for FW which do not support MLX5_REG_PCMR */
+ *supported = false;
+ *enabled = true;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return;
+
+ if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+ return;
+
+ *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+ *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index def289375..b82d65802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
@@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *counter_id = MLX5_GET(alloc_q_counter_out, out,
+ counter_set_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+ int reset, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, clear, reset);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+ u32 *out_of_buffer)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ void *out;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+ if (!err)
+ *out_of_buffer = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b2438679..d6a3f412b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -140,7 +140,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
- mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs);
+ mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb..91846dfcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -508,6 +508,41 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index f2fd1ef16..e25a73ed2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 217ac530a..5def12c04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -48,18 +48,12 @@ struct mlx5e_vxlan_work {
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
- return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
- (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
mlx5_core_is_pf(mdev));
}
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-#else
-static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
-static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
-#endif
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
u16 port, int add);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ce21ee5b2..821a087c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2ad7f6785..5989f7cb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -50,3 +50,11 @@ config MLXSW_SPECTRUM
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
+
+config MLXSW_SPECTRUM_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on MLXSW_SPECTRUM && DCB
+ default y
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 584cac444..9b5ebf84c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -8,3 +8,4 @@ mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_switchdev.o
+mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f69f62805..b0a0b01bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -44,7 +44,7 @@
#include <linux/seq_file.h>
#include <linux/u64_stats_sync.h>
#include <linux/netdevice.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/types.h>
@@ -55,6 +55,7 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
@@ -73,6 +74,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
static struct dentry *mlxsw_core_dbg_root;
+static struct workqueue_struct *mlxsw_wq;
+
struct mlxsw_core_pcpu_stats {
u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
@@ -93,11 +96,9 @@ struct mlxsw_core {
struct list_head rx_listener_list;
struct list_head event_listener_list;
struct {
- struct sk_buff *resp_skb;
- u64 tid;
- wait_queue_head_t wait;
- bool trans_active;
- struct mutex lock; /* One EMAD transaction at a time. */
+ atomic64_t tid;
+ struct list_head trans_list;
+ spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
} emad;
struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
@@ -114,6 +115,12 @@ struct mlxsw_core {
/* driver_priv has to be always the last item */
};
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -284,7 +291,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
@@ -300,7 +307,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_WRITE);
mlxsw_emad_op_tlv_class_set(op_tlv,
MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
- mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
}
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
@@ -322,7 +329,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
char *buf;
@@ -333,7 +340,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
- mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
@@ -370,58 +377,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
-#define MLXSW_EMAD_TIMEOUT_MS 200
-
-static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- int err;
- int ret;
-
- mlxsw_core->emad.trans_active = true;
-
- err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
- if (err) {
- dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
- mlxsw_core->emad.tid);
- dev_kfree_skb(skb);
- goto trans_inactive_out;
- }
-
- ret = wait_event_timeout(mlxsw_core->emad.wait,
- !(mlxsw_core->emad.trans_active),
- msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
- if (!ret) {
- dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
- mlxsw_core->emad.tid);
- err = -EIO;
- goto trans_inactive_out;
- }
-
- return 0;
-
-trans_inactive_out:
- mlxsw_core->emad.trans_active = false;
- return err;
-}
-
-static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
- char *op_tlv)
+static int mlxsw_emad_process_status(char *op_tlv,
+ enum mlxsw_emad_op_tlv_status *p_status)
{
- enum mlxsw_emad_op_tlv_status status;
- u64 tid;
+ *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
- status = mlxsw_emad_op_tlv_status_get(op_tlv);
- tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
-
- switch (status) {
+ switch (*p_status) {
case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
return 0;
case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
- dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EAGAIN;
case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
@@ -432,70 +397,150 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
default:
- dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EIO;
}
}
-static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb)
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+ enum mlxsw_emad_op_tlv_status *p_status)
+{
+ return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+ struct list_head list;
+ struct list_head bulk_list;
+ struct mlxsw_core *core;
+ struct sk_buff *tx_skb;
+ struct mlxsw_tx_info tx_info;
+ struct delayed_work timeout_dw;
+ unsigned int retries;
+ u64 tid;
+ struct completion completion;
+ atomic_t active;
+ mlxsw_reg_trans_cb_t *cb;
+ unsigned long cb_priv;
+ const struct mlxsw_reg_info *reg;
+ enum mlxsw_core_reg_access_type type;
+ int err;
+ enum mlxsw_emad_op_tlv_status emad_status;
+ struct rcu_head rcu;
+};
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
- return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+ unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+ mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct mlxsw_reg_trans *trans)
{
- struct sk_buff *trans_skb;
- int n_retry;
+ struct sk_buff *skb;
int err;
- n_retry = 0;
-retry:
- /* We copy the EMAD to a new skb, since we might need
- * to retransmit it in case of failure.
- */
- trans_skb = skb_copy(skb, GFP_KERNEL);
- if (!trans_skb) {
- err = -ENOMEM;
- goto out;
+ skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ atomic_set(&trans->active, 1);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ if (err) {
+ dev_kfree_skb(skb);
+ return err;
}
+ mlxsw_emad_trans_timeout_schedule(trans);
+ return 0;
+}
- err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
- if (!err) {
- struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+
+ dev_kfree_skb(trans->tx_skb);
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ trans->err = err;
+ complete(&trans->completion);
+}
- err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
- if (err)
- dev_kfree_skb(resp_skb);
- if (!err || err != -EAGAIN)
- goto out;
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans)
+{
+ int err;
+
+ if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+ trans->retries++;
+ err = mlxsw_emad_transmit(trans->core, trans);
+ if (err == 0)
+ return;
+ } else {
+ err = -EIO;
}
- if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ mlxsw_emad_trans_finish(trans, err);
+}
-out:
- dev_kfree_skb(skb);
- mlxsw_core->emad.tid++;
- return err;
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+ struct mlxsw_reg_trans *trans = container_of(work,
+ struct mlxsw_reg_trans,
+ timeout_dw.work);
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ mlxsw_emad_transmit_retry(trans->core, trans);
+}
+
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans,
+ struct sk_buff *skb)
+{
+ int err;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+ if (err == -EAGAIN) {
+ mlxsw_emad_transmit_retry(mlxsw_core, trans);
+ } else {
+ if (err == 0) {
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+
+ if (trans->cb)
+ trans->cb(mlxsw_core,
+ mlxsw_emad_reg_payload(op_tlv),
+ trans->reg->len, trans->cb_priv);
+ }
+ mlxsw_emad_trans_finish(trans, err);
+ }
}
+/* called with rcu read lock held */
static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
+ struct mlxsw_reg_trans *trans;
- if (mlxsw_emad_is_resp(skb) &&
- mlxsw_core->emad.trans_active &&
- mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
- mlxsw_core->emad.resp_skb = skb;
- mlxsw_core->emad.trans_active = false;
- wake_up(&mlxsw_core->emad.wait);
- } else {
- dev_kfree_skb(skb);
+ if (!mlxsw_emad_is_resp(skb))
+ goto free_skb;
+
+ list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+ if (mlxsw_emad_get_tid(skb) == trans->tid) {
+ mlxsw_emad_process_response(mlxsw_core, trans, skb);
+ break;
+ }
}
+
+free_skb:
+ dev_kfree_skb(skb);
}
static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
@@ -522,18 +567,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
+ u64 tid;
int err;
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
*/
- get_random_bytes(&mlxsw_core->emad.tid, 4);
- mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+ get_random_bytes(&tid, 4);
+ tid <<= 32;
+ atomic64_set(&mlxsw_core->emad.tid, tid);
- init_waitqueue_head(&mlxsw_core->emad.wait);
- mlxsw_core->emad.trans_active = false;
- mutex_init(&mlxsw_core->emad.lock);
+ INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+ spin_lock_init(&mlxsw_core->emad.trans_list_lock);
err = mlxsw_core_rx_listener_register(mlxsw_core,
&mlxsw_emad_rx_listener,
@@ -591,6 +637,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
return skb;
}
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_reg_trans *trans,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv, u64 tid)
+{
+ struct sk_buff *skb;
+ int err;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ list_add_tail(&trans->bulk_list, bulk_list);
+ trans->core = mlxsw_core;
+ trans->tx_skb = skb;
+ trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->tx_info.is_emad = true;
+ INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+ trans->tid = tid;
+ init_completion(&trans->completion);
+ trans->cb = cb;
+ trans->cb_priv = cb_priv;
+ trans->reg = reg;
+ trans->type = type;
+
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ err = mlxsw_emad_transmit(mlxsw_core, trans);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del(&trans->bulk_list);
+ dev_kfree_skb(trans->tx_skb);
+ return err;
+}
+
/*****************
* Core functions
*****************/
@@ -680,24 +779,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
.llseek = seq_lseek
};
-static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
- const char *buf, size_t size)
-{
- __be32 *m = (__be32 *) buf;
- int i;
- int count = size / sizeof(__be32);
-
- for (i = count - 1; i >= 0; i--)
- if (m[i])
- break;
- i++;
- count = i ? i : 1;
- for (i = 0; i < count; i += 4)
- dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
- i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
- be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
-}
-
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{
spin_lock(&mlxsw_core_driver_list_lock);
@@ -795,8 +876,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
- port_index, count);
+ return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
@@ -808,13 +888,171 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
- port_index);
+ return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
+}
+
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+ pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+ pool_index, size, threshold_type);
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+ pool_index, threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ pool_index, threshold);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_snapshot)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_max_clear)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+ sb_index, tc_index,
+ pool_type, p_cur, p_max);
}
static const struct devlink_ops mlxsw_devlink_ops = {
- .port_split = mlxsw_devlink_port_split,
- .port_unsplit = mlxsw_devlink_port_unsplit,
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+ .sb_pool_get = mlxsw_devlink_sb_pool_get,
+ .sb_pool_set = mlxsw_devlink_sb_pool_set,
+ .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
};
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
@@ -880,8 +1118,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_devlink_register;
- err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
- mlxsw_bus_info);
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
if (err)
goto err_driver_init;
@@ -892,7 +1129,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
return 0;
err_debugfs_init:
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
devlink_unregister(devlink);
err_devlink_register:
@@ -918,7 +1155,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -929,26 +1166,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
-static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
-{
- return container_of(driver_priv, struct mlxsw_core, driver_priv);
-}
-
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
tx_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
tx_info);
}
@@ -1108,56 +1336,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+ return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
- enum mlxsw_core_reg_access_type type)
+ enum mlxsw_core_reg_access_type type,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv)
{
+ u64 tid = mlxsw_core_tid_get(mlxsw_core);
+ struct mlxsw_reg_trans *trans;
int err;
- char *op_tlv;
- struct sk_buff *skb;
- struct mlxsw_tx_info tx_info = {
- .local_port = MLXSW_PORT_CPU_PORT,
- .is_emad = true,
- };
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
- if (!skb)
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans)
return -ENOMEM;
- mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
- mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+ err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+ bulk_list, cb, cb_priv, tid);
+ if (err) {
+ kfree(trans);
+ return err;
+ }
+ return 0;
+}
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
- mlxsw_core->emad.tid);
- mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
- err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
- if (!err) {
- op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
- memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
- reg->len);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
- mlxsw_core->emad.tid - 1);
- mlxsw_core_buf_dump_dbg(mlxsw_core,
- mlxsw_core->emad.resp_skb->data,
- mlxsw_core->emad.resp_skb->len);
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+ int err;
- dev_kfree_skb(mlxsw_core->emad.resp_skb);
- }
+ wait_for_completion(&trans->completion);
+ cancel_delayed_work_sync(&trans->timeout_dw);
+ err = trans->err;
+ if (trans->retries)
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+ trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+ trans->tid, trans->reg->id,
+ mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_core_reg_access_type_str(trans->type),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ list_del(&trans->bulk_list);
+ kfree_rcu(trans, rcu);
return err;
}
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+ struct mlxsw_reg_trans *trans;
+ struct mlxsw_reg_trans *tmp;
+ int sum_err = 0;
+ int err;
+
+ list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+ err = mlxsw_reg_trans_wait(trans);
+ if (err && sum_err == 0)
+ sum_err = err; /* first error to be returned */
+ }
+ return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
+ enum mlxsw_emad_op_tlv_status status;
int err, n_retry;
char *in_mbox, *out_mbox, *tmp;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
in_mbox = mlxsw_cmd_mbox_alloc();
if (!in_mbox)
return -ENOMEM;
@@ -1168,7 +1452,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
goto free_in_mbox;
}
- mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+ mlxsw_core_tid_get(mlxsw_core));
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
@@ -1176,60 +1461,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
retry:
err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
if (!err) {
- err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
- if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ err = mlxsw_emad_process_status(out_mbox, &status);
+ if (err) {
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+ status, mlxsw_emad_op_tlv_status_str(status));
+ }
}
if (!err)
memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
reg->len);
- mlxsw_core->emad.tid++;
mlxsw_cmd_mbox_free(out_mbox);
free_in_mbox:
mlxsw_cmd_mbox_free(in_mbox);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
return err;
}
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+ char *payload, size_t payload_len,
+ unsigned long cb_priv)
+{
+ char *orig_payload = (char *) cb_priv;
+
+ memcpy(orig_payload, payload, payload_len);
+}
+
static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
- u64 cur_tid;
+ LIST_HEAD(bulk_list);
int err;
- if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
- dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
- reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
- return -EINTR;
- }
-
- cur_tid = mlxsw_core->emad.tid;
- dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
/* During initialization EMAD interface is not available to us,
* so we default to command interface. We switch to EMAD interface
* after setting the appropriate traps.
*/
if (!mlxsw_core->emad.use_emad)
- err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
- payload, type);
- else
- err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
payload, type);
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type, &bulk_list,
+ mlxsw_core_reg_access_cb,
+ (unsigned long) payload);
if (err)
- dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
- mutex_unlock(&mlxsw_core->emad.lock);
- return err;
+ return err;
+ return mlxsw_reg_trans_bulk_wait(&bulk_list);
}
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
@@ -1358,6 +1644,46 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ if (split)
+ devlink_port_split_set(devlink_port, split_group);
+ devlink_port_type_eth_set(devlink_port, dev);
+ return devlink_port_register(devlink, devlink_port, local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_init);
+
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
+{
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ devlink_port_unregister(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_fini);
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
@@ -1400,17 +1726,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
}
EXPORT_SYMBOL(mlxsw_cmd_exec);
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
static int __init mlxsw_core_module_init(void)
{
- mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
- if (!mlxsw_core_dbg_root)
+ int err;
+
+ mlxsw_wq = create_workqueue(mlxsw_core_driver_name);
+ if (!mlxsw_wq)
return -ENOMEM;
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root) {
+ err = -ENOMEM;
+ goto err_debugfs_create_dir;
+ }
return 0;
+
+err_debugfs_create_dir:
+ destroy_workqueue(mlxsw_wq);
+ return err;
}
static void __exit mlxsw_core_module_exit(void)
{
debugfs_remove_recursive(mlxsw_core_dbg_root);
+ destroy_workqueue(mlxsw_wq);
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c73d1c079..436bc49df 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -43,6 +43,8 @@
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
@@ -61,6 +63,8 @@ struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
@@ -74,10 +78,9 @@ struct mlxsw_tx_info {
bool is_emad;
};
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
-
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
struct mlxsw_rx_listener {
@@ -106,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+ size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -131,6 +147,26 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
+struct mlxsw_core_port {
+ struct devlink_port devlink_port;
+};
+
+static inline void *
+mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+ /* mlxsw_core_port is ensured to always be the first field in driver
+ * port structure.
+ */
+ return mlxsw_core_port;
+}
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group);
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
struct mlxsw_swid_config {
@@ -183,11 +219,43 @@ struct mlxsw_driver {
const char *kind;
struct module *owner;
size_t priv_size;
- int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
- void (*fini)(void *driver_priv);
- int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
- int (*port_unsplit)(void *driver_priv, u8 local_port);
+ void (*fini)(struct mlxsw_core *mlxsw_core);
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count);
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+ int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+ int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+ int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+ int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
u8 txhdr_len;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ffe4c0305..57d48da70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1805,6 +1805,184 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* QTCT - QoS Switch Traffic Class Table
+ * -------------------------------------
+ * Configures the mapping between the packet switch priority and the
+ * traffic class on the transmit port.
+ */
+#define MLXSW_REG_QTCT_ID 0x400A
+#define MLXSW_REG_QTCT_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_qtct = {
+ .id = MLXSW_REG_QTCT_ID,
+ .len = MLXSW_REG_QTCT_LEN,
+};
+
+/* reg_qtct_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+
+/* reg_qtct_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
+
+/* reg_qtct_switch_prio
+ * Switch priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
+
+/* reg_qtct_tclass
+ * Traffic class.
+ * Default values:
+ * switch_prio 0 : tclass 1
+ * switch_prio 1 : tclass 0
+ * switch_prio i : tclass i, for i > 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+ u8 switch_prio, u8 tclass)
+{
+ MLXSW_REG_ZERO(qtct, payload);
+ mlxsw_reg_qtct_local_port_set(payload, local_port);
+ mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
+ mlxsw_reg_qtct_tclass_set(payload, tclass);
+}
+
+/* QEEC - QoS ETS Element Configuration Register
+ * ---------------------------------------------
+ * Configures the ETS elements.
+ */
+#define MLXSW_REG_QEEC_ID 0x400D
+#define MLXSW_REG_QEEC_LEN 0x1C
+
+static const struct mlxsw_reg_info mlxsw_reg_qeec = {
+ .id = MLXSW_REG_QEEC_ID,
+ .len = MLXSW_REG_QEEC_LEN,
+};
+
+/* reg_qeec_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qeec_hr {
+ MLXSW_REG_QEEC_HIERARCY_PORT,
+ MLXSW_REG_QEEC_HIERARCY_GROUP,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+};
+
+/* reg_qeec_element_hierarchy
+ * 0 - Port
+ * 1 - Group
+ * 2 - Subgroup
+ * 3 - Traffic Class
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
+
+/* reg_qeec_element_index
+ * The index of the element in the hierarchy.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+
+/* reg_qeec_next_element_index
+ * The index of the next (lower) element in the hierarchy.
+ * Access: RW
+ *
+ * Note: Reserved for element_hierarchy 0.
+ */
+MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
+enum {
+ MLXSW_REG_QEEC_BYTES_MODE,
+ MLXSW_REG_QEEC_PACKETS_MODE,
+};
+
+/* reg_qeec_pb
+ * Packets or bytes mode.
+ * 0 - Bytes mode
+ * 1 - Packets mode
+ * Access: RW
+ *
+ * Note: Used for max shaper configuration. For Spectrum, packets mode
+ * is supported only for traffic classes of CPU port.
+ */
+MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
+/* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
+
+/* A large max rate will disable the max shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+
+/* reg_qeec_max_shaper_rate
+ * Max shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+
+/* reg_qeec_de
+ * DWRR configuration enable. Enables configuration of the dwrr and
+ * dwrr_weight.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
+
+/* reg_qeec_dwrr
+ * Transmission selection algorithm to use on the link going down from
+ * the ETS element.
+ * 0 - Strict priority
+ * 1 - DWRR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
+
+/* reg_qeec_dwrr_weight
+ * DWRR weight on the link going down from the ETS element. The
+ * percentage of bandwidth guaranteed to an ETS element within
+ * its hierarchy. The sum of all weights across all ETS elements
+ * within one hierarchy should be equal to 100. Reserved when
+ * transmission selection algorithm is strict priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+
+static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
+ mlxsw_reg_qeec_element_index_set(payload, index);
+ mlxsw_reg_qeec_next_element_index_set(payload, next_index);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -2141,6 +2319,145 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
mlxsw_reg_paos_e_set(payload, 1);
}
+/* PFCC - Ports Flow Control Configuration Register
+ * ------------------------------------------------
+ * Configures and retrieves the per port flow control configuration.
+ */
+#define MLXSW_REG_PFCC_ID 0x5007
+#define MLXSW_REG_PFCC_LEN 0x20
+
+static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
+ .id = MLXSW_REG_PFCC_ID,
+ .len = MLXSW_REG_PFCC_LEN,
+};
+
+/* reg_pfcc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+
+/* reg_pfcc_pnat
+ * Port number access type. Determines the way local_port is interpreted:
+ * 0 - Local port number.
+ * 1 - IB / label port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
+
+/* reg_pfcc_shl_cap
+ * Send to higher layers capabilities:
+ * 0 - No capability of sending Pause and PFC frames to higher layers.
+ * 1 - Device has capability of sending Pause and PFC frames to higher
+ * layers.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
+
+/* reg_pfcc_shl_opr
+ * Send to higher layers operation:
+ * 0 - Pause and PFC frames are handled by the port (default).
+ * 1 - Pause and PFC frames are handled by the port and also sent to
+ * higher layers. Only valid if shl_cap = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
+
+/* reg_pfcc_ppan
+ * Pause policy auto negotiation.
+ * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
+ * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
+ * based on the auto-negotiation resolution.
+ * Access: RW
+ *
+ * Note: The auto-negotiation advertisement is set according to pptx and
+ * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
+ */
+MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
+
+/* reg_pfcc_prio_mask_tx
+ * Bit per priority indicating if Tx flow control policy should be
+ * updated based on bit pfctx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
+
+/* reg_pfcc_prio_mask_rx
+ * Bit per priority indicating if Rx flow control policy should be
+ * updated based on bit pfcrx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
+
+/* reg_pfcc_pptx
+ * Admin Pause policy on Tx.
+ * 0 - Never generate Pause frames (default).
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
+
+/* reg_pfcc_aptx
+ * Active (operational) Pause policy on Tx.
+ * 0 - Never generate Pause frames.
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
+
+/* reg_pfcc_pfctx
+ * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
+ * 0 - Never generate priority Pause frames on the specified priority
+ * (default).
+ * 1 - Generate priority Pause frames according to Rx buffer threshold on
+ * the specified priority.
+ * Access: RW
+ *
+ * Note: pfctx and pptx must be mutually exclusive.
+ */
+MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
+
+/* reg_pfcc_pprx
+ * Admin Pause policy on Rx.
+ * 0 - Ignore received Pause frames (default).
+ * 1 - Respect received Pause frames.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
+
+/* reg_pfcc_aprx
+ * Active (operational) Pause policy on Rx.
+ * 0 - Ignore received Pause frames.
+ * 1 - Respect received Pause frames.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
+
+/* reg_pfcc_pfcrx
+ * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
+ * 0 - Ignore incoming priority Pause frames on the specified priority
+ * (default).
+ * 1 - Respect incoming priority Pause frames on the specified priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
+
+#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
+{
+ mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
+ mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
+}
+
+static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pfcc, payload);
+ mlxsw_reg_pfcc_local_port_set(payload, local_port);
+}
+
/* PPCNT - Ports Performance Counters Register
* -------------------------------------------
* The PPCNT register retrieves per port performance counters.
@@ -2180,6 +2497,11 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+enum mlxsw_reg_ppcnt_grp {
+ MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+};
+
/* reg_ppcnt_grp
* Performance counter group.
* Group 63 indicates all groups. Only valid on Set() operation with
@@ -2215,6 +2537,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
*/
MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+/* Ethernet IEEE 802.3 Counter Group */
+
/* reg_ppcnt_a_frames_transmitted_ok
* Access: RO
*/
@@ -2329,15 +2653,160 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
0x08 + 0x90, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+/* Ethernet Per Priority Group Counters */
+
+/* reg_ppcnt_rx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_rx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_tx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_tx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_rx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_rx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_tx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_tx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_rx_pause_transition
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_ppcnt_grp grp,
+ u8 prio_tc)
{
MLXSW_REG_ZERO(ppcnt, payload);
mlxsw_reg_ppcnt_swid_set(payload, 0);
mlxsw_reg_ppcnt_local_port_set(payload, local_port);
mlxsw_reg_ppcnt_pnat_set(payload, 0);
- mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
- mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
+}
+
+/* PPTB - Port Prio To Buffer Register
+ * -----------------------------------
+ * Configures the switch priority to buffer table.
+ */
+#define MLXSW_REG_PPTB_ID 0x500B
+#define MLXSW_REG_PPTB_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pptb = {
+ .id = MLXSW_REG_PPTB_ID,
+ .len = MLXSW_REG_PPTB_LEN,
+};
+
+enum {
+ MLXSW_REG_PPTB_MM_UM,
+ MLXSW_REG_PPTB_MM_UNICAST,
+ MLXSW_REG_PPTB_MM_MULTICAST,
+};
+
+/* reg_pptb_mm
+ * Mapping mode.
+ * 0 - Map both unicast and multicast packets to the same buffer.
+ * 1 - Map only unicast packets.
+ * 2 - Map only multicast packets.
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports the first option.
+ */
+MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
+
+/* reg_pptb_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+
+/* reg_pptb_um
+ * Enables the update of the untagged_buf field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
+
+/* reg_pptb_pm
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
+
+/* reg_pptb_prio_to_buff
+ * Mapping of switch priority <i> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
+
+/* reg_pptb_pm_msb
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
+
+/* reg_pptb_untagged_buff
+ * Mapping of untagged frames to one of the allocated receive port buffers.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
+ * Spectrum, as it maps untagged packets based on the default switch priority.
+ */
+MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
+#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pptb, payload);
+ mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
+ mlxsw_reg_pptb_local_port_set(payload, local_port);
+ mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
@@ -2346,7 +2815,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
* allocation for different Prios, and the Pause threshold management.
*/
#define MLXSW_REG_PBMC_ID 0x500C
-#define MLXSW_REG_PBMC_LEN 0x68
+#define MLXSW_REG_PBMC_LEN 0x6C
static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
.id = MLXSW_REG_PBMC_ID,
@@ -2374,6 +2843,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
*/
MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
+
/* reg_pbmc_buf_lossy
* The field indicates if the buffer is lossy.
* 0 - Lossless
@@ -2398,6 +2869,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+/* reg_pbmc_buf_xoff_threshold
+ * Once the amount of data in the buffer goes above this value, device
+ * starts sending PFC frames for all priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
+ 0x08, 0x04, false);
+
+/* reg_pbmc_buf_xon_threshold
+ * When the amount of data in the buffer goes below this value, device
+ * stops sending PFC frames for the priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
+ 0x08, 0x04, false);
+
static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
@@ -2416,6 +2911,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
}
+static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
+ int buf_index, u16 size,
+ u16 threshold)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+ mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
+ mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
+}
+
/* PSPA - Port Switch Partition Allocation
* ---------------------------------------
* Controls the association of a port with a switch partition and enables
@@ -2985,9 +3491,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
.len = MLXSW_REG_SBPR_LEN,
};
-enum mlxsw_reg_sbpr_dir {
- MLXSW_REG_SBPR_DIR_INGRESS,
- MLXSW_REG_SBPR_DIR_EGRESS,
+/* shared direstion enum for SBPR, SBCM, SBPM */
+enum mlxsw_reg_sbxx_dir {
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBXX_DIR_EGRESS,
};
/* reg_sbpr_dir
@@ -3020,7 +3527,7 @@ enum mlxsw_reg_sbpr_mode {
MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
- enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
enum mlxsw_reg_sbpr_mode mode, u32 size)
{
MLXSW_REG_ZERO(sbpr, payload);
@@ -3062,11 +3569,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
-enum mlxsw_reg_sbcm_dir {
- MLXSW_REG_SBCM_DIR_INGRESS,
- MLXSW_REG_SBCM_DIR_EGRESS,
-};
-
/* reg_sbcm_dir
* Direction.
* Access: Index
@@ -3079,6 +3581,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
*/
MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
/* reg_sbcm_max_buff
* When the pool associated to the port-pg/tclass is configured to
* static, Maximum buffer size for the limiter configured in cells.
@@ -3099,7 +3605,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
- enum mlxsw_reg_sbcm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff, u8 pool)
{
MLXSW_REG_ZERO(sbcm, payload);
@@ -3111,8 +3617,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
mlxsw_reg_sbcm_pool_set(payload, pool);
}
-/* SBPM - Shared Buffer Class Management Register
- * ----------------------------------------------
+/* SBPM - Shared Buffer Port Management Register
+ * ---------------------------------------------
* The SBPM register configures and retrieves the shared buffer allocation
* and configuration according to Port-Pool, including the definition
* of the associated quota.
@@ -3139,17 +3645,33 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
-enum mlxsw_reg_sbpm_dir {
- MLXSW_REG_SBPM_DIR_INGRESS,
- MLXSW_REG_SBPM_DIR_EGRESS,
-};
-
/* reg_sbpm_dir
* Direction.
* Access: Index
*/
MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
/* reg_sbpm_min_buff
* Minimum buffer size for the limiter, in cells.
* Access: RW
@@ -3170,17 +3692,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
- enum mlxsw_reg_sbpm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
MLXSW_REG_ZERO(sbpm, payload);
mlxsw_reg_sbpm_local_port_set(payload, local_port);
mlxsw_reg_sbpm_pool_set(payload, pool);
mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_clr_set(payload, clr);
mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
}
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+ *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
/* SBMM - Shared Buffer Multicast Management Register
* --------------------------------------------------
* The SBMM register configures and retrieves the shared buffer allocation
@@ -3236,6 +3766,104 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
mlxsw_reg_sbmm_pool_set(payload, pool);
}
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
+ MLXSW_REG_SBSR_REC_LEN * \
+ MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
+ .id = MLXSW_REG_SBSR_ID,
+ .len = MLXSW_REG_SBSR_LEN,
+};
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+ MLXSW_REG_ZERO(sbsr, payload);
+ mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+ u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy =
+ mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+ *p_max_buff_occupancy =
+ mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -3283,6 +3911,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFMR";
case MLXSW_REG_SPVMLR_ID:
return "SPVMLR";
+ case MLXSW_REG_QTCT_ID:
+ return "QTCT";
+ case MLXSW_REG_QEEC_ID:
+ return "QEEC";
case MLXSW_REG_PMLP_ID:
return "PMLP";
case MLXSW_REG_PMTU_ID:
@@ -3293,8 +3925,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "PPAD";
case MLXSW_REG_PAOS_ID:
return "PAOS";
+ case MLXSW_REG_PFCC_ID:
+ return "PFCC";
case MLXSW_REG_PPCNT_ID:
return "PPCNT";
+ case MLXSW_REG_PPTB_ID:
+ return "PPTB";
case MLXSW_REG_PBMC_ID:
return "PBMC";
case MLXSW_REG_PSPA_ID:
@@ -3323,6 +3959,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SBPM";
case MLXSW_REG_SBMM_ID:
return "SBMM";
+ case MLXSW_REG_SBSR_ID:
+ return "SBSR";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 668b2f465..374080027 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,7 +49,7 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
-#include <net/devlink.h>
+#include <linux/dcbnl.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -171,23 +171,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
@@ -247,15 +230,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -307,7 +298,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
- u8 *p_width)
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -318,6 +309,7 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
@@ -379,7 +371,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -399,11 +391,15 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -438,16 +434,89 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
+ bool pause_en, bool pfc_en, u16 delay)
+{
+ u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+
+ delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
+ MLXSW_SP_PAUSE_DELAY;
+
+ if (pause_en || pfc_en)
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
+ pg_size + delay, pg_size);
+ else
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+}
+
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
+ u16 delay = !!my_pfc ? my_pfc->delay : 0;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i, j, err;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool configure = false;
+ bool pfc = false;
+
+ for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (prio_tc[j] == i) {
+ pfc = pfc_en & BIT(j);
+ configure = true;
+ break;
+ }
+ }
+
+ if (!configure)
+ continue;
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ int mtu, bool pause_en)
+{
+ u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
+ bool dcb_en = !!mlxsw_sp_port->dcb.ets;
+ struct ieee_pfc *my_pfc;
+ u8 *prio_tc;
+
+ prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
+ my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
+
+ return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
+ pause_en, my_pfc);
+}
+
static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
if (err)
return err;
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_port_mtu_set;
dev->mtu = mtu;
return 0;
+
+err_port_mtu_set:
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
}
static struct rtnl_link_stats64 *
@@ -861,6 +930,27 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
}
+static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
+ size_t len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
+ int err;
+
+ if (!mlxsw_sp_port->split)
+ err = snprintf(name, len, "p%d", module + 1);
+ else
+ err = snprintf(name, len, "p%ds%d", module + 1,
+ lane / width);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
@@ -877,6 +967,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -897,6 +988,68 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+ pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ethtool_pauseparam *pause)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = pause->tx_pause || pause->rx_pause;
+ int err;
+
+ if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+ netdev_err(dev, "PFC already enabled on port\n");
+ return -EINVAL;
+ }
+
+ if (pause->autoneg) {
+ netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+ if (err) {
+ netdev_err(dev, "Failed to set PAUSE parameters\n");
+ goto err_port_pause_configure;
+ }
+
+ mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+ mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+ return 0;
+
+err_port_pause_configure:
+ pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
+}
+
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(char *payload);
@@ -1032,7 +1185,8 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev,
int i;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -1263,7 +1417,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -1322,7 +1477,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -1354,12 +1508,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
@@ -1380,6 +1529,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
.get_strings = mlxsw_sp_port_get_strings,
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
@@ -1402,12 +1553,112 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_de_set(qeec_pl, true);
+ mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
+ mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mase_set(qeec_pl, true);
+ mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtct_pl[MLXSW_REG_QTCT_LEN];
+
+ mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
+ tclass);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
+}
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, i;
+
+ /* Setup the elements hierarcy, so that each TC is linked to
+ * one subgroup, which are all member in the same group.
+ */
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
+ 0);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, false, 0);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ false, 0);
+ if (err)
+ return err;
+ }
+
+ /* Make sure the max shaper is disabled in all hierarcies that
+ * support it.
+ */
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i, i,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+
+ /* Map all priorities to traffic class 0. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
size_t bytes;
int err;
@@ -1420,6 +1671,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1460,16 +1714,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
*/
dev->hard_header_len += MLXSW_TXHDR_LEN;
- devlink_port = &mlxsw_sp_port->devlink_port;
- if (mlxsw_sp_port->split)
- devlink_port_split_set(devlink_port, module);
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sp_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1509,6 +1753,21 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_buffers_init;
}
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_ets_init;
+ }
+
+ /* ETS and buffers must be initialized before DCB. */
+ err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_dcb_init;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
err = register_netdev(dev);
if (err) {
@@ -1517,7 +1776,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
+ err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
+ mlxsw_sp_port->local_port, dev,
+ mlxsw_sp_port->split, module);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sp_port->local_port);
+ goto err_core_port_init;
+ }
err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
if (err)
@@ -1527,16 +1793,18 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return 0;
err_port_vlan_init:
+ mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
+err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+err_port_dcb_init:
+err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sp_port->devlink_port);
-err_devlink_port_register:
err_dev_addr_init:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
@@ -1548,28 +1816,6 @@ err_port_active_vlans_alloc:
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
@@ -1590,15 +1836,13 @@ static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sp_port)
return;
mlxsw_sp->ports[local_port] = NULL;
- devlink_port = &mlxsw_sp_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- devlink_port_unregister(devlink_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_vports_fini(mlxsw_sp_port);
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
@@ -1620,8 +1864,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
@@ -1632,13 +1876,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
@@ -1659,11 +1904,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
-static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
{
- struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *mlxsw_sp_port;
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
int i;
int err;
@@ -1675,18 +1994,14 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@@ -1711,36 +2026,26 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
-static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -1754,12 +2059,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -1771,14 +2071,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
@@ -2080,10 +2373,10 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
}
-static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
@@ -2144,6 +2437,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
err_switchdev_init:
err_lag_init:
+ mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
@@ -2154,11 +2448,12 @@ err_event_register:
return err;
}
-static void mlxsw_sp_fini(void *priv)
+static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
mlxsw_sp_ports_remove(mlxsw_sp);
@@ -2201,16 +2496,26 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
};
static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = MLXSW_DEVICE_KIND_SPECTRUM,
- .owner = THIS_MODULE,
- .priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
- .fini = mlxsw_sp_fini,
- .port_split = mlxsw_sp_port_split,
- .port_unsplit = mlxsw_sp_port_unsplit,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
};
static int
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4b8abaf06..13b30eaa1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -42,15 +42,15 @@
#include <linux/bitops.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
+#include <linux/dcbnl.h>
#include <net/switchdev.h>
-#include <net/devlink.h>
#include "port.h"
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */
+#define MLXSW_SP_VFID_BR_MAX 6144 /* Bridged VLAN interfaces */
#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
#define MLXSW_SP_LAG_MAX 64
@@ -62,6 +62,24 @@
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+#define MLXSW_SP_BYTES_PER_CELL 96
+
+#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+
+/* Maximum delay buffer needed in case of PAUSE frames, in cells.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 612
+
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
+{
+ delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
+ return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
+}
+
struct mlxsw_sp_port;
struct mlxsw_sp_upper {
@@ -100,6 +118,40 @@ static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
return fid >= MLXSW_SP_VFID_BASE;
}
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
+ struct {
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+ } ports[MLXSW_PORT_MAX_PORTS];
+};
+
struct mlxsw_sp {
struct {
struct list_head list;
@@ -130,6 +182,7 @@ struct mlxsw_sp {
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_sp_sb sb;
};
static inline struct mlxsw_sp_upper *
@@ -148,6 +201,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
@@ -166,14 +220,33 @@ struct mlxsw_sp_port {
struct mlxsw_sp_vfid *vfid;
u16 vid;
} vport;
+ struct {
+ u8 tx_pause:1,
+ rx_pause:1;
+ } link;
+ struct {
+ struct ieee_ets *ets;
+ struct ieee_maxrate *maxrate;
+ struct ieee_pfc *pfc;
+ } dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
- struct devlink_port devlink_port;
};
+static inline bool
+mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
+}
+
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
@@ -245,7 +318,39 @@ enum mlxsw_sp_flood_table {
};
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -265,5 +370,33 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight);
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass);
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc);
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate);
+
+#ifdef CONFIG_MLXSW_SPECTRUM_DCB
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+
+#else
+
+static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{}
+
+#endif
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d59195e3f..074cdda7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -34,36 +34,140 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/dcbnl.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
#include "spectrum.h"
#include "core.h"
#include "port.h"
#include "reg.h"
-struct mlxsw_sp_pb {
- u8 index;
- u16 size;
-};
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+ u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.prs[dir][pool];
+}
-#define MLXSW_SP_PB(_index, _size) \
- { \
- .index = _index, \
- .size = _size, \
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ struct mlxsw_sp_sb_pr *pr;
+ int err;
+
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ pr->mode = mode;
+ pr->size = size;
+ return 0;
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int err;
+
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
+ min_buff, max_buff, pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
+ struct mlxsw_sp_sb_cm *cm;
+
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+ cm->min_buff = min_buff;
+ cm->max_buff = max_buff;
+ cm->pool = pool;
}
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+ int err;
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+ min_buff, max_buff);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ pm->min_buff = min_buff;
+ pm->max_buff = max_buff;
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbpm_pl, size_t sbpm_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+ mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
+
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list,
+ mlxsw_sp_sb_pm_occ_query_cb,
+ (unsigned long) pm);
+}
-static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
- MLXSW_SP_PB(0, 208),
- MLXSW_SP_PB(1, 208),
- MLXSW_SP_PB(2, 208),
- MLXSW_SP_PB(3, 208),
- MLXSW_SP_PB(4, 208),
- MLXSW_SP_PB(5, 208),
- MLXSW_SP_PB(6, 208),
- MLXSW_SP_PB(7, 208),
- MLXSW_SP_PB(9, 208),
+static const u16 mlxsw_sp_pbs[] = {
+ [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
+ [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
};
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+#define MLXSW_SP_PB_UNUSED 8
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -73,194 +177,206 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
- const struct mlxsw_sp_pb *pb;
-
- pb = &mlxsw_sp_pbs[i];
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ if (i == MLXSW_SP_PB_UNUSED)
+ continue;
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
}
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
+ MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
MLXSW_REG(pbmc), pbmc_pl);
}
-#define MLXSW_SP_SB_BYTES_PER_CELL 96
+static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
-struct mlxsw_sp_sb_pool {
- u8 pool;
- enum mlxsw_reg_sbpr_dir dir;
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+}
-#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
- ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
- ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-
-#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .mode = _mode, \
- .size = _size, \
+#define MLXSW_SP_SB_PR_INGRESS_SIZE \
+ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP_SB_PR_EGRESS_SIZE \
+ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+
+#define MLXSW_SP_SB_PR(_mode, _size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
}
-#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
- MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
- MLXSW_SP_SB_POOL_INGRESS(1, 0),
- MLXSW_SP_SB_POOL_INGRESS(2, 0),
- MLXSW_SP_SB_POOL_INGRESS(3, 0),
- MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
- MLXSW_SP_SB_POOL_EGRESS(1, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
};
-#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+};
-static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+
+static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pr *prs,
+ size_t prs_len)
{
- char sbpr_pl[MLXSW_REG_SBPR_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
- const struct mlxsw_sp_sb_pool *pool;
+ for (i = 0; i < prs_len; i++) {
+ const struct mlxsw_sp_sb_pr *pr;
- pool = &mlxsw_sp_sb_pools[i];
- mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
- pool->mode, pool->size);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ pr = &prs[i];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
+ pr->mode, pr->size);
if (err)
return err;
}
return 0;
}
-struct mlxsw_sp_sb_cm {
- union {
- u8 pg;
- u8 tc;
- } u;
- enum mlxsw_reg_sbcm_dir dir;
- u32 min_buff;
- u32 max_buff;
- u8 pool;
-};
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
-#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
- { \
- .u.pg = _pg_tc, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+ err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_prs_ingress,
+ MLXSW_SP_SB_PRS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_prs_egress,
+ MLXSW_SP_SB_PRS_EGRESS_LEN);
+}
+
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
-#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
-
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
- MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
- MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
- MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
};
-#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(1, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
};
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
-static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const struct mlxsw_sp_sb_cm *cms,
- size_t cms_len)
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
{
- char sbcm_pl[MLXSW_REG_SBCM_LEN];
int i;
int err;
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
+ if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
- mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
- cm->min_buff, cm->max_buff, cm->pool);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
+ cm->min_buff, cm->max_buff,
+ cm->pool);
if (err)
return err;
}
@@ -269,105 +385,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
- MLXSW_SP_SB_CMS_LEN);
+ int err;
+
+ err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_cms_ingress,
+ MLXSW_SP_SB_CMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_cms_egress,
+ MLXSW_SP_SB_CMS_EGRESS_LEN);
}
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
- MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
}
-struct mlxsw_sp_sb_pm {
- u8 pool;
- enum mlxsw_reg_sbpm_dir dir;
- u32 min_buff;
- u32 max_buff;
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- }
+#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
-#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
- _min_buff, _max_buff)
-
-#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
- _min_buff, _max_buff)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
- MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
- MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
- MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pm *pms,
+ size_t pms_len)
{
- char sbpm_pl[MLXSW_REG_SBPM_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ for (i = 0; i < pms_len; i++) {
const struct mlxsw_sp_sb_pm *pm;
- pm = &mlxsw_sp_sb_pms[i];
- mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
- pm->pool, pm->dir,
- pm->min_buff, pm->max_buff);
- err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
- MLXSW_REG(sbpm), sbpm_pl);
+ pm = &pms[i];
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
+ pm->min_buff, pm->max_buff);
if (err)
return err;
}
return 0;
}
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_pms_ingress,
+ MLXSW_SP_SB_PMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_pms_egress,
+ MLXSW_SP_SB_PMS_EGRESS_LEN);
+}
+
struct mlxsw_sp_sb_mm {
- u8 prio;
u32 min_buff;
u32 max_buff;
u8 pool;
};
-#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
- { \
- .prio = _prio, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -382,7 +513,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_sb_mm *mc;
mc = &mlxsw_sp_sb_mms[i];
- mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
mc->max_buff, mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
@@ -391,26 +522,39 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+ if (err)
+ return err;
+ return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ MLXSW_SP_SB_SIZE,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_TC_COUNT,
+ MLXSW_SP_SB_TC_COUNT);
+}
- return err;
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
return err;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
@@ -420,3 +564,394 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+
+static u8 pool_get(u16 pool_index)
+{
+ return pool_index % MLXSW_SP_SB_POOL_COUNT;
+}
+
+static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
+{
+ u16 pool_index;
+
+ pool_index = pool;
+ if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
+ pool_index += MLXSW_SP_SB_POOL_COUNT;
+ return pool_index;
+}
+
+static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
+{
+ return pool_index < MLXSW_SP_SB_POOL_COUNT ?
+ MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ pool_info->pool_type = dir;
+ pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+ pool_info->threshold_type = pr->mode;
+ return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ enum mlxsw_reg_sbpr_mode mode = threshold_type;
+ u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+
+ return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+ return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 threshold,
+ u32 *p_max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+ int val;
+
+ val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+ return -EINVAL;
+ *p_max_buff = val;
+ } else {
+ *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+ }
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+ pm->max_buff);
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ u32 max_buff;
+ int err;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+ 0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+ cm->max_buff);
+ *p_pool_index = pool_index_get(cm->pool, pool_type);
+ return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ u8 pool = pool_index;
+ u32 max_buff;
+ int err;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
+ if (pool < MLXSW_SP_SB_POOL_COUNT)
+ return -EINVAL;
+ pool -= MLXSW_SP_SB_POOL_COUNT;
+ } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
+ return -EINVAL;
+ }
+ return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
+ 0, max_buff, pool);
+}
+
+#define MASKED_COUNT_MAX \
+ (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+ u8 masked_count;
+ u8 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbsr_pl, size_t sbsr_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count;
+ u8 local_port;
+ int rec_index = 0;
+ struct mlxsw_sp_sb_cm *cm;
+ int i;
+
+ memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ unsigned long cb_priv;
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ u8 masked_count;
+ u8 local_port_1;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ local_port_1 = local_port;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ cb_ctx.masked_count = masked_count;
+ cb_ctx.local_port_1 = local_port_1;
+ memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+ cb_priv);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ unsigned int masked_count;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, NULL, 0);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+ return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 000000000..01cfb7512
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,484 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/dcbnl.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
+ u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ bool has_ets_tc = false;
+ int i, tx_bw_sum = 0;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ has_ets_tc = true;
+ tx_bw_sum += ets->tc_tx_bw[i];
+ break;
+ default:
+ netdev_err(dev, "Only strict priority and ETS are supported\n");
+ return -EINVAL;
+ }
+
+ if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "Invalid TC\n");
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && tx_bw_sum != 100) {
+ netdev_err(dev, "Total ETS bandwidth should equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *prio_tc)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
+
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (prio_tc[i] == pg)
+ return true;
+ return false;
+}
+
+static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *old_prio_tc, u8 *new_prio_tc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int err, i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 pg = old_prio_tc[i];
+
+ if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Create the required PGs, but don't destroy existing ones, as
+ * traffic is still directed to them.
+ */
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ ets->prio_tc, pause_en,
+ mlxsw_sp_port->dcb.pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
+ if (err) {
+ netdev_err(dev, "Failed to set PG-priority mapping\n");
+ goto err_port_prio_pg_map;
+ }
+
+ err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
+ ets->prio_tc);
+ if (err)
+ netdev_warn(dev, "Failed to remove ununsed PGs\n");
+
+ return 0;
+
+err_port_prio_pg_map:
+ mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
+ return err;
+}
+
+static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int i, err;
+
+ /* Egress configuration. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ if (err) {
+ netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
+ i);
+ goto err_port_ets_set;
+ }
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ ets->prio_tc[i]);
+ if (err) {
+ netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
+ ets->prio_tc[i]);
+ goto err_port_prio_tc_set;
+ }
+ }
+
+ /* Ingress configuration. */
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
+ if (err)
+ goto err_port_headroom_set;
+
+ return 0;
+
+err_port_headroom_set:
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_prio_tc_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_ets_set:
+ for (i--; i >= 0; i--) {
+ bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = my_ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ }
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ maxrate->tc_maxrate[i]);
+ if (err) {
+ netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
+ goto err_port_ets_maxrate_set;
+ }
+ }
+
+ memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
+
+ return 0;
+
+err_port_ets_maxrate_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0, my_maxrate->tc_maxrate[i]);
+ return err;
+}
+
+static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_PRIO_CNT, prio);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ if (err)
+ return err;
+
+ my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
+ my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
+ if (err) {
+ netdev_err(dev, "Failed to get PFC count for priority %d\n",
+ i);
+ return err;
+ }
+ }
+
+ memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_pfc *pfc)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
+ pfc->pfc_en) {
+ netdev_err(dev, "PAUSE frames already enabled on port\n");
+ return -EINVAL;
+ }
+
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc,
+ false, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom for PFC\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure PFC\n");
+ goto err_port_pfc_set;
+ }
+
+ memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+
+err_port_pfc_set:
+ __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc, false,
+ mlxsw_sp_port->dcb.pfc);
+ return err;
+}
+
+static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
+ .ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
+ .ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
+ .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+
+ .getdcbx = mlxsw_sp_dcbnl_getdcbx,
+ .setdcbx = mlxsw_sp_dcbnl_setdcbx,
+};
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.ets)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.ets);
+}
+
+static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.maxrate)
+ return -ENOMEM;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.maxrate);
+}
+
+static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.pfc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.pfc);
+}
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_maxrate_init;
+ err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_pfc_init;
+
+ mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
+
+ return 0;
+
+err_port_pfc_init:
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+err_port_maxrate_init:
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+ return err;
+}
+
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 9cd6f4722..3710f19ed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1438,8 +1438,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 7a60a2675..25f658b38 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,7 +43,6 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
-#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats {
};
struct mlxsw_sx_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
- struct devlink_port devlink_port;
};
/* tx_hdr_version
@@ -303,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -317,11 +316,14 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
@@ -518,7 +520,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev,
int i;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
+ MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -955,9 +958,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
struct mlxsw_sx_port *mlxsw_sx_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
bool usable;
int err;
@@ -1011,14 +1012,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto port_not_usable;
}
- devlink_port = &mlxsw_sx_port->devlink_port;
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sx_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1076,11 +1069,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
+ err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
+ mlxsw_sx_port->local_port, dev, false, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sx_port->local_port);
+ goto err_core_port_init;
+ }
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
+err_core_port_init:
+ unregister_netdev(dev);
err_register_netdev:
err_port_mac_learning_mode_set:
err_port_stp_state_set:
@@ -1089,8 +1090,6 @@ err_port_mtu_set:
err_port_speed_set:
err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sx_port->devlink_port);
-err_devlink_port_register:
port_not_usable:
err_port_module_check:
err_dev_addr_get:
@@ -1103,15 +1102,12 @@ err_alloc_stats:
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sx_port)
return;
- devlink_port = &mlxsw_sx_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
- devlink_port_unregister(devlink_port);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
@@ -1454,10 +1450,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
}
-static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sx->core = mlxsw_core;
@@ -1504,9 +1500,9 @@ err_event_register:
return err;
}
-static void mlxsw_sx_fini(void *priv)
+static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sx_traps_fini(mlxsw_sx);
mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index a8522d8af..20cb85bc0 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1354,6 +1354,7 @@ ks8695_probe(struct platform_device *pdev)
struct resource *rxirq_res, *txirq_res, *linkirq_res;
int ret = 0;
int buff_n;
+ bool inv_mac_addr = false;
u32 machigh, maclow;
/* Initialise a net_device */
@@ -1456,8 +1457,7 @@ ks8695_probe(struct platform_device *pdev)
ndev->dev_addr[5] = maclow & 0xFF;
if (!is_valid_ether_addr(ndev->dev_addr))
- dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
/* In order to be efficient memory-wise, we allocate both
* rings in one go.
@@ -1520,6 +1520,9 @@ ks8695_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
ks8695_port_type(ksp), ndev->dev_addr);
} else {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 75dc46c5f..280e761d3 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
/* Notify the network subsystem that the packet has been sent. */
if (dev)
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/**
@@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev)
hw_ena_intr(hw);
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 86ea17e7b..0a26b11ca 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -28,11 +28,12 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/of_net.h>
#include "enc28j60_hw.h"
#define DRV_NAME "enc28j60"
-#define DRV_VERSION "1.01"
+#define DRV_VERSION "1.02"
#define SPI_OPLEN 1
@@ -89,22 +90,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
- struct spi_transfer t = {
+ struct spi_transfer tx = {
.tx_buf = tx_buf,
+ .len = SPI_OPLEN,
+ };
+ struct spi_transfer rx = {
.rx_buf = rx_buf,
- .len = SPI_OPLEN + len,
+ .len = len,
};
struct spi_message msg;
int ret;
tx_buf[0] = ENC28J60_READ_BUF_MEM;
- tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */
spi_message_init(&msg);
- spi_message_add_tail(&t, &msg);
+ spi_message_add_tail(&tx, &msg);
+ spi_message_add_tail(&rx, &msg);
+
ret = spi_sync(priv->spi, &msg);
if (ret == 0) {
- memcpy(data, &rx_buf[SPI_OPLEN], len);
+ memcpy(data, rx_buf, len);
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
@@ -1146,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
@@ -1198,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
@@ -1233,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
@@ -1544,6 +1552,7 @@ static int enc28j60_probe(struct spi_device *spi)
{
struct net_device *dev;
struct enc28j60_net *priv;
+ const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
@@ -1575,7 +1584,12 @@ static int enc28j60_probe(struct spi_device *spi)
ret = -EIO;
goto error_irq;
}
- eth_hw_addr_random(dev);
+
+ macaddr = of_get_mac_address(spi->dev.of_node);
+ if (macaddr)
+ ether_addr_copy(dev->dev_addr, macaddr);
+ else
+ eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
/* Board setup must set the relevant edge trigger type;
@@ -1630,9 +1644,16 @@ static int enc28j60_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id enc28j60_dt_ids[] = {
+ { .compatible = "microchip,enc28j60" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
static struct spi_driver enc28j60_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = enc28j60_dt_ids,
},
.probe = enc28j60_probe,
.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 7df318346..42e34076d 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* save the timestamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
@@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev)
struct encx24j600_priv *priv = netdev_priv(dev);
netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3e67f451f..4367dd687 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head = TX_NEXT(tx_head);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ret = NETDEV_TX_OK;
out_unlock:
spin_unlock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 122c2ee3d..ed89029ff 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(irq);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419dbd..612c7a44b 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9ba975853..2874dffe7 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags = 0;
u16 vlan_tag = 0;
struct fifo_info *fifo = NULL;
- int do_spin_lock = 1;
int offload_type;
int enable_per_list_interrupt = 0;
struct config_param *config = &sp->config;
@@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue += sp->udp_fifo_idx;
if (skb->len > 1024)
enable_per_list_interrupt = 1;
- do_spin_lock = 0;
}
}
}
@@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- if (do_spin_lock)
- spin_lock_irqsave(&fifo->tx_lock, flags);
- else {
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&fifo->tx_lock, flags);
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 75683fb26..e744acc18 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -59,8 +59,8 @@
netdev_warn((nn)->netdev, fmt, ## args); \
} while (0)
-/* Max time to wait for NFP to respond on updates (in ms) */
-#define NFP_NET_POLL_TIMEOUT 5000
+/* Max time to wait for NFP to respond on updates (in seconds) */
+#define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */
#define NFP_NET_CRTL_BAR 0
@@ -298,6 +298,8 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @bufsz: Buffer allocation size for convenience of management routines
+ * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -319,6 +321,7 @@ struct nfp_net_rx_ring {
dma_addr_t dma;
unsigned int size;
+ unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -444,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_posted: Pending reconfig bits coming from async sources
+ * @reconfig_timer_active: Timer for reading reconfiguration results is pending
+ * @reconfig_sync_present: Some thread is performing synchronous reconfig
+ * @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
* @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -472,6 +479,9 @@ struct nfp_net {
u32 rx_offset;
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
#ifdef CONFIG_PCI_IOV
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
@@ -504,9 +514,6 @@ struct nfp_net {
int txd_cnt;
int rxd_cnt;
- struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS];
- struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS];
-
u8 num_irqs;
u8 num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
@@ -528,6 +535,10 @@ struct nfp_net {
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
+ u32 reconfig_posted;
+ bool reconfig_timer_active;
+ bool reconfig_sync_present;
+ struct timer_list reconfig_timer;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -721,6 +732,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn);
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
#ifdef CONFIG_NFP_NET_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43c618baf..ba26bb356 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
+/* Firmware reconfig
+ *
+ * Firmware reconfig may take a while so we have two versions of it -
+ * synchronous and asynchronous (posted). All synchronous callers are holding
+ * RTNL so we don't have to worry about serializing them.
+ */
+static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
+{
+ nn_writel(nn, NFP_NET_CFG_UPDATE, update);
+ /* ensure update is written before pinging HW */
+ nn_pci_flush(nn);
+ nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+}
+
+/* Pass 0 as update to run posted reconfigs. */
+static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
+{
+ update |= nn->reconfig_posted;
+ nn->reconfig_posted = 0;
+
+ nfp_net_reconfig_start(nn, update);
+
+ nn->reconfig_timer_active = true;
+ mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
+}
+
+static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
+{
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
+ if (reg == 0)
+ return true;
+ if (reg & NFP_NET_CFG_UPDATE_ERR) {
+ nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+ return true;
+ } else if (last_check) {
+ nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+ return true;
+ }
+
+ return false;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ bool timed_out = false;
+
+ /* Poll update field, waiting for NFP to ack the config */
+ while (!nfp_net_reconfig_check_done(nn, timed_out)) {
+ msleep(1);
+ timed_out = time_is_before_eq_jiffies(deadline);
+ }
+
+ if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
+ return -EIO;
+
+ return timed_out ? -EIO : 0;
+}
+
+static void nfp_net_reconfig_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ nn->reconfig_timer_active = false;
+
+ /* If sync caller is present it will take over from us */
+ if (nn->reconfig_sync_present)
+ goto done;
+
+ /* Read reconfig status and report errors */
+ nfp_net_reconfig_check_done(nn, true);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig_post() - Post async reconfig request
+ * @nn: NFP Net device to reconfigure
+ * @update: The value for the update field in the BAR config
+ *
+ * Record FW reconfiguration request. Reconfiguration will be kicked off
+ * whenever reconfiguration machinery is idle. Multiple requests can be
+ * merged together!
+ */
+static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
+{
+ spin_lock_bh(&nn->reconfig_lock);
+
+ /* Sync caller will kick off async reconf when it's done, just post */
+ if (nn->reconfig_sync_present) {
+ nn->reconfig_posted |= update;
+ goto done;
+ }
+
+ /* Opportunistically check if the previous command is done */
+ if (!nn->reconfig_timer_active ||
+ nfp_net_reconfig_check_done(nn, false))
+ nfp_net_reconfig_start_async(nn, update);
+ else
+ nn->reconfig_posted |= update;
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
@@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
- int cnt, ret = 0;
- u32 new;
+ bool cancelled_timer = false;
+ u32 pre_posted_requests;
+ int ret;
spin_lock_bh(&nn->reconfig_lock);
- nn_writel(nn, NFP_NET_CFG_UPDATE, update);
- /* ensure update is written before pinging HW */
- nn_pci_flush(nn);
- nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+ nn->reconfig_sync_present = true;
- /* Poll update field, waiting for NFP to ack the config */
- for (cnt = 0; ; cnt++) {
- new = nn_readl(nn, NFP_NET_CFG_UPDATE);
- if (new == 0)
- break;
- if (new & NFP_NET_CFG_UPDATE_ERR) {
- nn_err(nn, "Reconfig error: 0x%08x\n", new);
- ret = -EIO;
- break;
- } else if (cnt >= NFP_NET_POLL_TIMEOUT) {
- nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
- update, cnt);
- ret = -EIO;
- break;
- }
- mdelay(1);
+ if (nn->reconfig_timer_active) {
+ del_timer(&nn->reconfig_timer);
+ nn->reconfig_timer_active = false;
+ cancelled_timer = true;
+ }
+ pre_posted_requests = nn->reconfig_posted;
+ nn->reconfig_posted = 0;
+
+ spin_unlock_bh(&nn->reconfig_lock);
+
+ if (cancelled_timer)
+ nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+
+ /* Run the posted reconfigs which were issued before we started */
+ if (pre_posted_requests) {
+ nfp_net_reconfig_start(nn, pre_posted_requests);
+ nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
}
+ nfp_net_reconfig_start(nn, update);
+ ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+
+ nn->reconfig_sync_present = false;
+
spin_unlock_bh(&nn->reconfig_lock);
+
return ret;
}
@@ -347,12 +467,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
/**
* nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
* @tx_ring: TX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
}
@@ -360,12 +486,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
* @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
@@ -401,16 +533,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
cpumask_set_cpu(r, &r_vec->affinity_mask);
-
- r_vec->tx_ring = &nn->tx_rings[r];
- nn->tx_rings[r].idx = r;
- nn->tx_rings[r].r_vec = r_vec;
- nfp_net_tx_ring_init(r_vec->tx_ring);
-
- r_vec->rx_ring = &nn->rx_rings[r];
- nn->rx_rings[r].idx = r;
- nn->rx_rings[r].r_vec = r_vec;
- nfp_net_rx_ring_init(r_vec->rx_ring);
}
}
@@ -865,61 +987,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
}
/**
- * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring
- * @tx_ring: TX ring structure
+ * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @nn: NFP Net device
+ * @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
-static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
- struct sk_buff *skb;
- int nr_frags;
- int fidx;
- int idx;
+ struct pci_dev *pdev = nn->pdev;
while (tx_ring->rd_p != tx_ring->wr_p) {
- idx = tx_ring->rd_p % tx_ring->cnt;
+ int nr_frags, fidx, idx;
+ struct sk_buff *skb;
+ idx = tx_ring->rd_p % tx_ring->cnt;
skb = tx_ring->txbufs[idx].skb;
- if (skb) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_ring->txbufs[idx].fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx]));
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_ring->txbufs[idx].dma_addr = 0;
+ tx_ring->txbufs[idx].skb = NULL;
+ tx_ring->txbufs[idx].fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
}
+ memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -957,25 +1077,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_alloc_one() - Allocate and map skb for RX
* @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param)
+ * @fl_bufsz: size of freelist buffers
*
* This function will allcate a new skb, map it for DMA.
*
* Return: allocated skb or NULL on failure.
*/
static struct sk_buff *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr)
+nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
+ unsigned int fl_bufsz)
{
struct nfp_net *nn = rx_ring->r_vec->nfp_net;
struct sk_buff *skb;
- skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz);
+ skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
if (!skb) {
nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
return NULL;
}
*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
- nn->fl_bufsz, DMA_FROM_DEVICE);
+ fl_bufsz, DMA_FROM_DEVICE);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
dev_kfree_skb_any(skb);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
@@ -1020,62 +1142,101 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
}
/**
- * nfp_net_rx_flush() - Free any buffers currently on the RX ring
- * @rx_ring: RX ring to remove buffers from
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
*
- * Assumes that the device is stopped
+ * Warning: Do *not* call if ring buffers were never put on the FW freelist
+ * (i.e. device was not enabled)!
*/
-static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring)
+static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
- int idx;
+ unsigned int wr_idx, last_idx;
- while (rx_ring->rd_p != rx_ring->wr_p) {
- idx = rx_ring->rd_p % rx_ring->cnt;
+ /* Move the empty entry to the end of the list */
+ wr_idx = rx_ring->wr_p % rx_ring->cnt;
+ last_idx = rx_ring->cnt - 1;
+ rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
+ rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
+ rx_ring->rxbufs[last_idx].dma_addr = 0;
+ rx_ring->rxbufs[last_idx].skb = NULL;
- if (rx_ring->rxbufs[idx].skb) {
- dma_unmap_single(&pdev->dev,
- rx_ring->rxbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_ring->rxbufs[idx].skb);
- rx_ring->rxbufs[idx].dma_addr = 0;
- rx_ring->rxbufs[idx].skb = NULL;
- }
+ memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+ rx_ring->wr_ptr_add = 0;
+}
+
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = nn->pdev;
+ unsigned int i;
- memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx]));
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].skb)
+ continue;
- rx_ring->rd_p++;
+ dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
+ rx_ring->bufsz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].skb = NULL;
}
}
/**
- * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers
- * @rx_ring: RX ring to fill
- *
- * Try to fill as many buffers as possible into freelist. Return
- * number of buffers added.
- *
- * Return: Number of freelist buffers added.
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
*/
-static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ rxbufs = rx_ring->rxbufs;
- while (nfp_net_rx_space(rx_ring)) {
- skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr);
- if (!skb) {
- nfp_net_rx_flush(rx_ring);
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].skb =
+ nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
+ rx_ring->bufsz);
+ if (!rxbufs[i].skb) {
+ nfp_net_rx_ring_bufs_free(nn, rx_ring);
return -ENOMEM;
}
- nfp_net_rx_give_one(rx_ring, skb, dma_addr);
}
return 0;
}
/**
+ * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @rx_ring: RX ring to fill
+ */
+static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
* nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
* @flags: RX descriptor flags field in CPU byte order
*/
@@ -1240,7 +1401,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = rx_ring->rxbufs[idx].skb;
- new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr);
+ new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
+ nn->fl_bufsz);
if (!new_skb) {
nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
rx_ring->rxbufs[idx].dma_addr);
@@ -1256,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len);
- if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
- /* The packet data starts after the metadata */
+ if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
skb_reserve(skb, meta_len);
- } else {
- /* The packet data starts at a fixed offset */
+ else
skb_reserve(skb, nn->rx_offset);
- }
-
- /* Adjust the SKB for the dynamic meta data pre-pended */
skb_put(skb, data_len - meta_len);
nfp_net_set_hash(nn->netdev, skb, rxd);
@@ -1349,10 +1513,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0);
-
kfree(tx_ring->txbufs);
if (tx_ring->txds)
@@ -1360,11 +1520,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
@@ -1374,17 +1529,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
+static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = nn->txd_cnt;
+ tx_ring->cnt = cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
@@ -1397,11 +1553,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx);
-
netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
@@ -1415,6 +1566,59 @@ err_alloc:
return -ENOMEM;
}
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
+{
+ struct nfp_net_tx_ring *rings;
+ unsigned int r;
+
+ rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
+
+ for (r = 0; r < nn->num_tx_rings; r++) {
+ nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
+
+ if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
+ goto err_free_prev;
+ }
+
+ return rings;
+
+err_free_prev:
+ while (r--)
+ nfp_net_tx_ring_free(&rings[r]);
+ kfree(rings);
+ return NULL;
+}
+
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ struct nfp_net_tx_ring *old = nn->tx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ old[r].r_vec->tx_ring = &rings[r];
+
+ nn->tx_rings = rings;
+ return old;
+}
+
+static void
+nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ unsigned int r;
+
+ if (!rings)
+ return;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_free(&rings[r]);
+
+ kfree(rings);
+}
+
/**
* nfp_net_rx_ring_free() - Free resources allocated to a RX ring
* @rx_ring: RX ring to free
@@ -1425,10 +1629,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0);
-
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -1436,10 +1636,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
- rx_ring->wr_ptr_add = 0;
-
rx_ring->rxbufs = NULL;
rx_ring->rxds = NULL;
rx_ring->dma = 0;
@@ -1449,17 +1645,22 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
* @rx_ring: RX ring to allocate
+ * @fl_bufsz: Size of buffers to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
+ u32 cnt)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = nn->rxd_cnt;
+ rx_ring->cnt = cnt;
+ rx_ring->bufsz = fl_bufsz;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
@@ -1472,11 +1673,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
if (!rx_ring->rxbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx);
-
nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
@@ -1488,91 +1684,109 @@ err_alloc:
return -ENOMEM;
}
-static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
+ u32 buf_cnt)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
+ struct nfp_net_rx_ring *rings;
+ unsigned int r;
- while (n_free--) {
- r_vec = &nn->r_vecs[n_free];
- entry = &nn->irq_entries[r_vec->irq_idx];
+ rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
- nfp_net_rx_ring_free(r_vec->rx_ring);
- nfp_net_tx_ring_free(r_vec->tx_ring);
+ for (r = 0; r < nn->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
- irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
+ goto err_free_prev;
- netif_napi_del(&r_vec->napi);
+ if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
+ goto err_free_ring;
}
+
+ return rings;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&rings[r]);
+ }
+ kfree(rings);
+ return NULL;
}
-/**
- * nfp_net_free_rings() - Free all ring resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_free_rings(struct nfp_net *nn)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- __nfp_net_free_rings(nn, nn->num_r_vecs);
+ struct nfp_net_rx_ring *old = nn->rx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_rx_rings; r++)
+ old[r].r_vec->rx_ring = &rings[r];
+
+ nn->rx_rings = rings;
+ return old;
}
-/**
- * nfp_net_alloc_rings() - Allocate resources for RX and TX rings
- * @nn: NFP Net device to reconfigure
- *
- * Return: 0 on success or negative errno on error.
- */
-static int nfp_net_alloc_rings(struct nfp_net *nn)
+static void
+nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
- int err;
- int r;
+ unsigned int r;
+
+ if (!rings)
+ return;
for (r = 0; r < nn->num_r_vecs; r++) {
- r_vec = &nn->r_vecs[r];
- entry = &nn->irq_entries[r_vec->irq_idx];
-
- /* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
-
- snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, r);
- err = request_irq(entry->vector, r_vec->handler, 0,
- r_vec->name, r_vec);
- if (err) {
- nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
- goto err_napi_del;
- }
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+ nfp_net_rx_ring_free(&rings[r]);
+ }
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+ kfree(rings);
+}
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n",
- r, entry->vector, entry->entry);
+static int
+nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ int idx)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+ int err;
- /* Allocate TX ring resources */
- err = nfp_net_tx_ring_alloc(r_vec->tx_ring);
- if (err)
- goto err_free_irq;
+ r_vec->tx_ring = &nn->tx_rings[idx];
+ nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
- /* Allocate RX ring resources */
- err = nfp_net_rx_ring_alloc(r_vec->rx_ring);
- if (err)
- goto err_free_tx;
+ r_vec->rx_ring = &nn->rx_rings[idx];
+ nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
+
+ snprintf(r_vec->name, sizeof(r_vec->name),
+ "%s-rxtx-%d", nn->netdev->name, idx);
+ err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+ if (err) {
+ nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+ return err;
}
+ disable_irq(entry->vector);
+
+ /* Setup NAPI */
+ netif_napi_add(nn->netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
+
+ irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
return 0;
+}
+
+static void
+nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
-err_free_tx:
- nfp_net_tx_ring_free(r_vec->tx_ring);
-err_free_irq:
irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
-err_napi_del:
netif_napi_del(&r_vec->napi);
- __nfp_net_free_rings(nn, r);
- return err;
+ free_irq(entry->vector, r_vec);
}
/**
@@ -1646,6 +1860,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
}
+static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -1653,6 +1878,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
u32 new_ctrl, update;
+ unsigned int r;
int err;
new_ctrl = nn->ctrl;
@@ -1669,79 +1895,40 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err) {
+ if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- return;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
+ nfp_net_vec_clear_ring_data(nn, r);
}
nn->ctrl = new_ctrl;
}
-/**
- * nfp_net_start_vec() - Start ring vector
- * @nn: NFP Net device structure
- * @r_vec: Ring vector to be started
- */
-static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+static void
+nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ unsigned int idx)
{
- unsigned int irq_vec;
- int err = 0;
-
- irq_vec = nn->irq_entries[r_vec->irq_idx].vector;
-
- disable_irq(irq_vec);
-
- err = nfp_net_rx_fill_freelist(r_vec->rx_ring);
- if (err) {
- nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
- r_vec->irq_idx);
- goto out;
- }
-
- napi_enable(&r_vec->napi);
-out:
- enable_irq(irq_vec);
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
- return err;
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
}
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
- int err, r;
- u32 update = 0;
- u32 new_ctrl;
-
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
- return -EBUSY;
- }
+ u32 new_ctrl, update = 0;
+ unsigned int r;
+ int err;
new_ctrl = nn->ctrl;
- /* Step 1: Allocate resources for rings and the like
- * - Request interrupts
- * - Allocate RX and TX ring resources
- * - Setup initial RSS table
- */
- err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
- nn->exn_name, sizeof(nn->exn_name),
- NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
- if (err)
- return err;
-
- err = nfp_net_alloc_rings(nn);
- if (err)
- goto err_free_exn;
-
- err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
- if (err)
- goto err_free_rings;
-
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
- if (err)
- goto err_free_rings;
-
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
@@ -1756,22 +1943,18 @@ static int nfp_net_netdev_open(struct net_device *netdev)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- /* Step 2: Configure the NFP
- * - Enable rings from 0 to tx_rings/rx_rings - 1.
- * - Write MAC address (in case it changed)
- * - Set the MTU
- * - Set the Freelist buffer size
- * - Enable the FW
- */
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
+
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn, netdev->dev_addr);
+ nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
- nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
/* Enable device */
@@ -1784,69 +1967,213 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err)
- goto err_clear_config;
nn->ctrl = new_ctrl;
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
+
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- vxlan_get_rx_port(netdev);
+ vxlan_get_rx_port(nn->netdev);
}
- /* Step 3: Enable for kernel
- * - put some freelist descriptors on each RX ring
- * - enable NAPI on each ring
- * - enable all TX queues
- * - set link state
- */
+ return err;
+}
+
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn: NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
+{
+ int err;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nfp_net_clear_config_and_disable(nn);
+
+ return err;
+}
+
+/**
+ * nfp_net_open_stack() - Start the device from stack's perspective
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_open_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
for (r = 0; r < nn->num_r_vecs; r++) {
- err = nfp_net_start_vec(nn, &nn->r_vecs[r]);
- if (err)
- goto err_disable_napi;
+ napi_enable(&nn->r_vecs[r].napi);
+ enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
}
- netif_tx_wake_all_queues(netdev);
+ netif_tx_wake_all_queues(nn->netdev);
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ nfp_net_read_link_status(nn);
+}
+
+static int nfp_net_netdev_open(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err, r;
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
+ nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
+ return -EBUSY;
+ }
+
+ /* Step 1: Allocate resources for rings and the like
+ * - Request interrupts
+ * - Allocate RX and TX ring resources
+ * - Setup initial RSS table
+ */
+ err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
+ nn->exn_name, sizeof(nn->exn_name),
+ NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
+ if (err)
+ return err;
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
nn->lsc_name, sizeof(nn->lsc_name),
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
- goto err_stop_tx;
- nfp_net_read_link_status(nn);
+ goto err_free_exn;
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- return 0;
+ nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
+ GFP_KERNEL);
+ if (!nn->rx_rings)
+ goto err_free_lsc;
+ nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
+ GFP_KERNEL);
+ if (!nn->tx_rings)
+ goto err_free_rx_rings;
-err_stop_tx:
- netif_tx_disable(netdev);
- for (r = 0; r < nn->num_r_vecs; r++)
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
-err_disable_napi:
- while (r--) {
- napi_disable(&nn->r_vecs[r].napi);
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err)
+ goto err_free_prev_vecs;
+
+ err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
+ if (err)
+ goto err_cleanup_vec_p;
+
+ err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
+ nn->fl_bufsz, nn->rxd_cnt);
+ if (err)
+ goto err_free_tx_ring_p;
+
+ err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
+ if (err)
+ goto err_flush_rx_ring_p;
}
-err_clear_config:
- nfp_net_clear_config_and_disable(nn);
+
+ err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
+ if (err)
+ goto err_free_rings;
+
+ err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 2: Configure the NFP
+ * - Enable rings from 0 to tx_rings/rx_rings - 1.
+ * - Write MAC address (in case it changed)
+ * - Set the MTU
+ * - Set the Freelist buffer size
+ * - Enable the FW
+ */
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 3: Enable for kernel
+ * - put some freelist descriptors on each RX ring
+ * - enable NAPI on each ring
+ * - enable all TX queues
+ * - set link state
+ */
+ nfp_net_open_stack(nn);
+
+ return 0;
+
err_free_rings:
- nfp_net_free_rings(nn);
+ r = nn->num_r_vecs;
+err_free_prev_vecs:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+err_flush_rx_ring_p:
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+err_free_tx_ring_p:
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+err_cleanup_vec_p:
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+ kfree(nn->tx_rings);
+err_free_rx_rings:
+ kfree(nn->rx_rings);
+err_free_lsc:
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
return err;
}
/**
+ * nfp_net_close_stack() - Quiescent the stack (part of close)
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ netif_carrier_off(nn->netdev);
+ nn->link_up = false;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+ napi_disable(&nn->r_vecs[r].napi);
+ }
+
+ netif_tx_disable(nn->netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+
+ kfree(nn->rx_rings);
+ kfree(nn->tx_rings);
+
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
* nfp_net_netdev_close() - Called when the device is downed
* @netdev: netdev structure
*/
static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- int r;
if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
@@ -1855,14 +2182,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
- netif_carrier_off(netdev);
- nn->link_up = false;
-
- for (r = 0; r < nn->num_r_vecs; r++)
- napi_disable(&nn->r_vecs[r].napi);
-
- netif_tx_disable(netdev);
+ nfp_net_close_stack(nn);
/* Step 2: Tell NFP
*/
@@ -1870,13 +2190,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 3: Free resources
*/
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
- }
-
- nfp_net_free_rings(nn);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+ nfp_net_close_free_all(nn);
nn_dbg(nn, "%s down", netdev->name);
return 0;
@@ -1902,37 +2216,139 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
- return;
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
nn->ctrl = new_ctrl;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
+ unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
struct nfp_net *nn = netdev_priv(netdev);
- u32 tmp;
-
- nn_dbg(nn, "New MTU = %d\n", new_mtu);
+ struct nfp_net_rx_ring *tmp_rings;
+ int err;
if (new_mtu < 68 || new_mtu > nn->max_mtu) {
nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
return -EINVAL;
}
+ old_mtu = netdev->mtu;
+ old_fl_bufsz = nn->fl_bufsz;
+ new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
+
+ if (!netif_running(netdev)) {
+ netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
+ return 0;
+ }
+
+ /* Prepare new rings */
+ tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
+ nn->rxd_cnt);
+ if (!tmp_rings)
+ return -ENOMEM;
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
- /* Freelist buffer size rounded up to the nearest 1K */
- tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND;
- nn->fl_bufsz = roundup(tmp, 1024);
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
- /* restart if running */
- if (netif_running(netdev)) {
- nfp_net_netdev_close(netdev);
- nfp_net_netdev_open(netdev);
+ /* Try with old configuration and old rings */
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
+ netdev->mtu = old_mtu;
+ nn->fl_bufsz = old_fl_bufsz;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
+ err_new, err);
}
- return 0;
+ nfp_net_shadow_rx_rings_free(nn, tmp_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
+}
+
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+{
+ struct nfp_net_tx_ring *tx_rings = NULL;
+ struct nfp_net_rx_ring *rx_rings = NULL;
+ u32 old_rxd_cnt, old_txd_cnt;
+ int err;
+
+ if (!netif_running(nn->netdev)) {
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+ return 0;
+ }
+
+ old_rxd_cnt = nn->rxd_cnt;
+ old_txd_cnt = nn->txd_cnt;
+
+ /* Prepare new rings */
+ if (nn->rxd_cnt != rxd_cnt) {
+ rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
+ rxd_cnt);
+ if (!rx_rings)
+ return -ENOMEM;
+ }
+ if (nn->txd_cnt != txd_cnt) {
+ tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
+ if (!tx_rings) {
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ return -ENOMEM;
+ }
+ }
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
+
+ /* Try with old configuration and old rings */
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = old_rxd_cnt;
+ nn->txd_cnt = old_txd_cnt;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
+ err_new, err);
+ }
+
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ nfp_net_shadow_tx_rings_free(nn, tx_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
}
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
@@ -2108,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
be16_to_cpu(nn->vxlan_ports[i]));
- nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
}
/**
@@ -2254,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
+ setup_timer(&nn->reconfig_timer,
+ nfp_net_reconfig_timer, (unsigned long)nn);
+
return nn;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8692003ae..ad6c4e31c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -81,14 +81,10 @@
/**
* @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_TXR_MASK: Mask for TX rings
* @NFP_NET_RXR_MAX: Maximum number of RX rings
- * @NFP_NET_RXR_MASK: Mask for RX rings
*/
#define NFP_NET_TXR_MAX 64
-#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1)
#define NFP_NET_RXR_MAX 64
-#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1)
/**
* Read/Write config words (0x0000 - 0x002c)
@@ -152,9 +148,9 @@
* @NFP_NET_CFG_VERSION: Firmware version number
* @NFP_NET_CFG_STS: Status
* @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_MAX_MTU: Maximum support MTU
+ * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
* @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
* @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4c97c7131..f7c9a5bc4 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,8 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_rx_ring *rx_ring = file->private;
int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_rx_ring *rx_ring;
struct nfp_net_rx_desc *rxd;
struct sk_buff *skb;
struct nfp_net *nn;
@@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->rx_ring)
goto out;
- nn = rx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ rx_ring = r_vec->rx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = {
static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_tx_ring *tx_ring = file->private;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct sk_buff *skb;
@@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->tx_ring)
goto out;
- nn = tx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ tx_ring = r_vec->tx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -183,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{
- static struct dentry *queues, *tx, *rx;
+ struct dentry *queues, *tx, *rx;
char int_name[16];
int i;
@@ -196,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
+ if (IS_ERR_OR_NULL(queues))
return;
rx = debugfs_create_dir("rx", queues);
@@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
for (i = 0; i < nn->num_rx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx,
- &nn->rx_rings[i], &nfp_rx_q_fops);
+ &nn->r_vecs[i], &nfp_rx_q_fops);
}
for (i = 0; i < nn->num_tx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx,
- &nn->tx_rings[i], &nfp_tx_q_fops);
+ &nn->r_vecs[i], &nfp_tx_q_fops);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9a4084a68..ccfef1f17 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -153,37 +153,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
- if (netif_running(netdev)) {
- /* Some NIC drivers allow reconfiguration on the fly,
- * some down the interface, change and then up it
- * again. For now we don't allow changes when the
- * device is up.
- */
- nn_warn(nn, "Can't change rings while device is up\n");
- return -EBUSY;
- }
-
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
- rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS);
- rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS);
-
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS);
- txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS);
- if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt)
- nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
+ txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ return -EINVAL;
- nn->rxd_cnt = rxd_cnt;
- nn->txd_cnt = txd_cnt;
+ if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ return 0;
- return 0;
+ nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
+ nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+
+ return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
static void nfp_net_get_strings(struct net_device *netdev,
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 7096b11d1..0c49be93f 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -313,7 +313,8 @@ static int netx_eth_enable(struct net_device *ndev)
{
struct netx_eth_priv *priv = netdev_priv(ndev);
unsigned int mac4321, mac65;
- int running, i;
+ int running, i, ret;
+ bool inv_mac_addr = false;
ndev->netdev_ops = &netx_eth_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(5000);
@@ -358,15 +359,18 @@ static int netx_eth_enable(struct net_device *ndev)
xc_start(priv->xc);
if (!is_valid_ether_addr(ndev->dev_addr))
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
for (i=2; i<=18; i++)
pfifo_push(EMPTY_PTR_FIFO(priv->id),
FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
- return register_netdev(ndev);
+ ret = register_netdev(ndev);
+ if (inv_mac_addr)
+ printk("%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
+ return ret;
}
static int netx_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 52d9a94ae..87b7b8147 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a55d6d53..8d710a3b4 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {
/**
* struct pch_gbe_tx_ring - tx ring information
- * @tx_lock: spinlock structs
* @desc: pointer to the descriptor ring memory
* @dma: physical address of the descriptor ring
* @size: length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
* @buffer_info: array of buffer information structs
*/
struct pch_gbe_tx_ring {
- spinlock_t tx_lock;
struct pch_gbe_tx_desc *desc;
dma_addr_t dma;
unsigned int size;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3b98b263b..3cd87a41a 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
cleaned_count);
if (cleaned_count > 0) { /*skip this if nothing cleaned*/
/* Recover from running out of Tx resources in xmit_frame */
- spin_lock(&tx_ring->tx_lock);
+ netif_tx_lock(adapter->netdev);
if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
{
netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
tx_ring->next_to_clean);
- spin_unlock(&tx_ring->tx_lock);
+ netif_tx_unlock(adapter->netdev);
}
return cleaned;
}
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&tx_ring->tx_lock);
for (desNo = 0; desNo < tx_ring->count; desNo++) {
tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,15 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
- unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
netdev_dbg(netdev,
"Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2152,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* CRC,ITAG no support */
pch_gbe_tx_queue(adapter, tx_ring, skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 13d88a602..91be2f02e 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fa2db41e0..fb1d1031b 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index ddcfcab03..680d8c736 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -98,9 +98,40 @@ config QED
---help---
This enables the support for ...
+config QED_SRIOV
+ bool "QLogic QED 25/40/100Gb SR-IOV support"
+ depends on QED && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QED devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
---help---
This enables the support for ...
+
+config QEDE_VXLAN
+ bool "Virtual eXtensible Local Area Network support"
+ default n
+ depends on QEDE && VXLAN && !(QEDE=y && VXLAN=m)
+ ---help---
+ This enables hardware offload support for VXLAN protocol over
+ qede module. Say Y here if you want to enable hardware offload
+ support for Virtual eXtensible Local Area Network (VXLAN)
+ in the driver.
+
+config QEDE_GENEVE
+ bool "Generic Network Virtualization Encapsulation (GENEVE) support"
+ depends on QEDE && GENEVE && !(QEDE=y && GENEVE=m)
+ ---help---
+ This allows one to create GENEVE virtual interfaces that provide
+ Layer 2 Networks over Layer 3 Networks. GENEVE is often used
+ to tunnel virtual network infrastructure in virtualized environments.
+ Say Y here if you want to enable hardware offload support for
+ Generic Network Virtualization Encapsulation (GENEVE) in the driver.
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 84bc885d7..1b8136496 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2286,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
goto request_reset;
}
}
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
rtnl_unlock();
return;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 5c2fd5723..d1f157e43 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
- qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+ qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fcb8e9ba5..1042f2af8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,12 +26,14 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
#define VER_SIZE 16
+#define QED_WFQ_UNIT 100
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -74,6 +76,51 @@ struct qed_rt_data {
bool *b_valid;
};
+enum qed_tunn_mode {
+ QED_MODE_L2GENEVE_TUNN,
+ QED_MODE_IPGENEVE_TUNN,
+ QED_MODE_L2GRE_TUNN,
+ QED_MODE_IPGRE_TUNN,
+ QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+ QED_TUNN_CLSS_MAC_VLAN,
+ QED_TUNN_CLSS_MAC_VNI,
+ QED_TUNN_CLSS_INNER_MAC_VLAN,
+ QED_TUNN_CLSS_INNER_MAC_VNI,
+ MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
@@ -105,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE {
QED_PF_L2_QUE,
+ QED_VF,
QED_MAX_FEATURES,
};
@@ -192,6 +240,12 @@ struct qed_dmae_info {
struct dmae_cmd *p_dmae_cmd;
};
+struct qed_wfq_data {
+ /* when feature is configured for at least 1 vport */
+ u32 min_speed;
+ bool configured;
+};
+
struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
@@ -212,6 +266,7 @@ struct qed_qm_info {
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
+ struct qed_wfq_data *wfq_data;
};
struct storm_stats {
@@ -256,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine;
bool hw_init_done;
+ u8 num_funcs_on_engine;
+
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
@@ -306,8 +363,12 @@ struct qed_hwfn {
/* True if the driver requests for the link */
bool b_drv_link_init;
+ struct qed_vf_iov *vf_iov_info;
+ struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info;
+ struct qed_dcbx_info *p_dcbx_info;
+
struct qed_hw_cid_data *p_tx_cids;
struct qed_hw_cid_data *p_rx_cids;
@@ -322,6 +383,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
+#ifdef CONFIG_QED_SRIOV
+ struct workqueue_struct *iov_wq;
+ struct delayed_work iov_task;
+ unsigned long iov_task_flags;
+#endif
+
struct z_stream_s *stream;
};
@@ -430,6 +497,13 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* SRIOV */
+ struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
+
+ unsigned long tunn_mode;
+
+ bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -459,6 +533,8 @@ struct qed_dev {
const struct firmware *firmware;
};
+#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
@@ -480,6 +556,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -507,6 +587,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
-#define QED_ETH_INTERFACE_VERSION 300
-
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fc767c07a..ac284c58d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -24,11 +24,13 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -63,10 +65,12 @@ union conn_context {
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
+ u32 cids_per_vf;
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
enum ilt_clients {
@@ -97,6 +101,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
};
/* Per Path -
@@ -123,6 +131,11 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@@ -131,37 +144,60 @@ struct qed_cxt_mngr {
u32 pf_start_line;
};
-static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
-{
- u32 type, pf_cids = 0;
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
- for (type = 0; type < MAX_CONN_TYPES; type++)
- pf_cids += p_mngr->conn_cfg[type].cid_count;
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_cdu_iids *iids)
+{
+ u32 type;
- return pf_cids;
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- int type;
+ u32 vf_cids = 0, type;
- for (type = 0; type < MAX_CONN_TYPES; type++)
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
- DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x\n",
+ iids->cids, iids->vf_cids);
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
- u32 cid_count)
+ u32 cid_count, u32 vf_cid_cnt)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
- u32 curr_line, total, pf_cids;
+ struct qed_cdu_iids cdu_iids;
struct qed_qm_iids qm_iids;
+ u32 curr_line, total, i;
memset(&qm_iids, 0, sizeof(qm_iids));
+ memset(&cdu_iids, 0, sizeof(cdu_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
- pf_cids = qed_cxt_cdu_iids(p_mngr);
+ qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK];
- total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
@@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
- p_hwfn->qm_info.num_pqs, 0);
-
- DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
- qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, 0,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids,
+ qm_iids.vf_cids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
@@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
- u32 size, i, j;
+ u32 size, i, j, k;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
@@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
if (rc != 0)
goto ilt_shadow_fail;
}
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
}
return 0;
@@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ if (p_hwfn->cdev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
+ params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq;
- params.num_pf_pqs = qm_info->num_pqs;
+ params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
@@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 dq_pf_max_cid = 0;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
- /* 5 - PF */
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
}
}
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
@@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
+ qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
@@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons);
+ p_params->num_cons, 1);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c8e1f5e5c..234c0fa8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK
};
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -128,6 +131,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
/**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 000000000..21ec1c2df
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,563 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+
+#define QED_DCBX_MAX_MIB_READ_TRY (100)
+#define QED_ETH_TYPE_DEFAULT (0)
+#define QED_ETH_TYPE_ROCE (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define QED_ETH_TYPE_FCOE (0x8906)
+#define QED_TCP_PORT_ISCSI (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_DEFAULT);
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_TCP_PORT_ISCSI);
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_FCOE);
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
+ proto_id == QED_ETH_TYPE_ROCE);
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+{
+ return !!(qed_dcbx_app_port(app_info_bitmap) &&
+ proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ qed_dcbx_app_update[i].name, p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+ struct qed_hw_info *p_info,
+ bool enable,
+ bool update,
+ u8 prio,
+ u8 tc,
+ enum dcbx_protocol_type type,
+ enum qed_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].update = update;
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+
+ /* QM reconf data */
+ if (p_info->personality == personality) {
+ if (personality == QED_PCI_ETH)
+ p_info->non_offload_tc = tc;
+ else
+ p_info->offload_tc = tc;
+ }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn,
+ bool enable,
+ bool update,
+ u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+ struct qed_hw_info *p_info = &p_hwfn->hw_info;
+ enum qed_pci_personality personality;
+ enum dcbx_protocol_type id;
+ char *name;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = qed_dcbx_app_update[i].personality;
+ name = qed_dcbx_app_update[i].name;
+
+ qed_dcbx_set_params(p_data, p_info, enable, update,
+ prio, tc, type, personality);
+ }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ u32 app_prio_bitmap,
+ u16 id, enum dcbx_protocol_type *type)
+{
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl,
+ u32 pri_tc_tbl, int count, bool dcbx_enabled)
+{
+ u8 tc, priority_map;
+ enum dcbx_protocol_type type;
+ u16 protocol_id;
+ int priority;
+ bool enable;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ priority = ffs(priority_map) - 1;
+ if (priority < 0) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return -EINVAL;
+ }
+
+ tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = !(type == DCBX_PROTOCOL_ETH);
+
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+ }
+
+ /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+ * data for RoCE-v2 not the default app data.
+ */
+ if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+ tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+ qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+ priority, tc, DCBX_PROTOCOL_ROCE_V2);
+ }
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled;
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+
+ return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct qed_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct qed_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ bool dcbx_enabled;
+ int num_entries;
+ int rc = 0;
+
+ /* If DCBx version is non zero, then negotiation was
+ * successfuly performed
+ */
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_enabled);
+ if (rc)
+ return rc;
+
+ p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = dcbx_enabled;
+
+ qed_dcbx_dp_protocol(p_hwfn, &data);
+
+ memcpy(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct qed_dcbx_results));
+
+ return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_mib_meta_data *p_data,
+ enum qed_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ int rc = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+ return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = -EINVAL;
+
+ switch (type) {
+ case QED_DCBX_OPERATIONAL_MIB:
+ rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_REMOTE_MIB:
+ rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = 0;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ rc = qed_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ qed_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update(p_hwfn);
+ }
+ }
+
+ return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc = 0;
+
+ p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate 'struct qed_dcbx_info'\n");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_info *p_dcbx_info)
+{
+ kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct qed_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ bool update_flag = false;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_flag = update_flag;
+
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+
+ if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE);
+ if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE_V2);
+
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+ p_dcb_data = &p_dest->eth_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 000000000..e7f834dbd
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,80 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum qed_mib_read_type {
+ QED_DCBX_OPERATIONAL_MIB,
+ QED_DCBX_REMOTE_MIB,
+ QED_DCBX_LOCAL_MIB,
+ QED_DCBX_REMOTE_LLDP_MIB,
+ QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ bool update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+};
+
+struct qed_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ char *name;
+ enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+ u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ size_t size;
+ u32 addr;
+};
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+ struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f6b..2d89e8c16 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -22,6 +22,7 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -30,6 +31,11 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
/* API common to all protocols */
enum BAR_ID {
@@ -40,10 +46,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
- u32 bar_reg = (bar_id == BAR_ID_0 ?
- PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 1 << 17;
+ val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -105,12 +115,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->qm_vport_params = NULL;
kfree(qm_info->qm_port_params);
qm_info->qm_port_params = NULL;
+ kfree(qm_info->wfq_data);
+ qm_info->wfq_data = NULL;
}
void qed_resc_free(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
kfree(cdev->fw_data);
cdev->fw_data = NULL;
@@ -134,20 +149,29 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+ qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
+ qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
- u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1;
-
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
+ u16 num_vfs = 0;
+
+#ifdef CONFIG_QED_SRIOV
+ if (p_hwfn->cdev->p_iov_info)
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
memset(qm_info, 0, sizeof(*qm_info));
- num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */
@@ -160,40 +184,70 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
-
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
+ struct init_qm_pq_params *params =
+ &qm_info->qm_pq_params[curr_queue++];
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ } else {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.offload_tc;
+ params->wrr_group = 1;
+ }
}
/* Then init pure-LB PQ */
- qm_info->pure_lb_pq = i;
- qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[i].wrr_group = 1;
- i++;
+ qm_info->pure_lb_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id =
+ (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
qm_info->offload_pq = 0;
+ /* Then init per-VF PQs */
+ vf_offset = curr_queue;
+ for (i = 0; i < num_vfs; i++) {
+ /* First vport is used by the PF */
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports;
@@ -211,29 +265,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->num_vf_pqs = num_vfs;
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ for (i = 0; i < qm_info->num_vports; i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
alloc_err:
DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
- kfree(qm_info->qm_pq_params);
- kfree(qm_info->qm_vport_params);
- kfree(qm_info->qm_port_params);
-
+ qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ int rc;
+
+ /* qm_info is allocated in qed_init_qm_info() which is already called
+ * from qed_resc_alloc() or previous call of qed_qm_reconf().
+ * The allocated size may change each init, so we free it before next
+ * allocation.
+ */
+ qed_qm_info_free(p_hwfn);
+
+ /* initialize qed's qm data structure */
+ rc = qed_init_qm_info(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ /* stop PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ qed_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ qed_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ return rc;
+
+ /* start PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ return 0;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
+ if (IS_VF(cdev))
+ return rc;
+
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
return -ENOMEM;
@@ -279,7 +395,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@@ -308,6 +424,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
+ rc = qed_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* EQ */
p_eq = qed_eq_alloc(p_hwfn, 256);
if (!p_eq) {
@@ -330,6 +450,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
"Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
+
+ /* DCBX initialization */
+ rc = qed_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -350,6 +478,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -365,14 +496,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id)
+ struct qed_ptt *p_ptt, u16 id, bool is_vf)
{
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
@@ -380,6 +512,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+ if (is_vf)
+ id += 0x10;
+
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@@ -453,7 +588,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@@ -492,7 +634,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u32 concrete_fid;
int rc = 0;
+ u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -542,6 +686,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+
return rc;
}
@@ -558,6 +710,7 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
+ struct qed_tunn_start_params *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -574,7 +727,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */
- p_hwfn->qm_info.pf_rl = 100;
+ p_hwfn->qm_info.pf_rl = 100000;
}
qed_cxt_hw_init_pf(p_hwfn);
@@ -603,7 +756,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0)
return rc;
@@ -625,7 +778,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ allow_npar_tx_switch);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
@@ -672,6 +826,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -680,13 +835,25 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
- rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
- return rc;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ if (IS_PF(cdev)) {
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ p_hwfn->b_int_enabled = 1;
+ continue;
+ }
+
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -708,6 +875,11 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
+ if (!qm_lock_init) {
+ spin_lock_init(&qm_lock);
+ qm_lock_init = true;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -724,7 +896,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_hwfn->hw_info.hw_mode,
+ p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode,
allow_npar_tx_switch);
break;
@@ -749,6 +921,20 @@ int qed_hw_init(struct qed_dev *cdev,
return mfw_rc;
}
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
p_hwfn->hw_init_done = true;
}
@@ -811,6 +997,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@@ -842,15 +1033,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000);
}
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt,
- false);
- if (t_rc != 0)
- rc = t_rc;
+ if (IS_PF(cdev)) {
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt, false);
+ if (t_rc != 0)
+ rc = t_rc;
+ }
return rc;
}
@@ -861,7 +1053,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
@@ -885,6 +1082,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -914,6 +1114,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@@ -1009,13 +1216,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_funcs, i;
-
- num_funcs = MAX_NUM_PFS_BB;
+ int i, max_vf_vlan_filters;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+ max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+ max_vf_vlan_filters = 0;
+#endif
+
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
@@ -1220,6 +1433,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 reg_function_hide, tmp, eng_mask;
+ u8 num_funcs;
+
+ num_funcs = MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id,
+ p_hwfn->abs_pf_id,
+ p_hwfn->num_funcs_on_engine);
+}
+
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1228,6 +1486,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode;
int rc;
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,6 +1536,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
+ qed_get_num_funcs(p_hwfn, p_ptt);
+
qed_hw_get_resc(p_hwfn);
return rc;
@@ -1336,6 +1603,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
@@ -1387,6 +1657,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn);
err1:
qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1673,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc;
/* Store the precompiled init data ptrs */
- qed_init_iro_array(cdev);
+ if (IS_PF(cdev))
+ qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1706,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0.
*/
if (rc) {
- qed_init_free(p_hwfn);
- qed_mcp_free(p_hwfn);
- qed_hw_hwfn_free(p_hwfn);
+ if (IS_PF(cdev)) {
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
}
}
@@ -1449,10 +1724,17 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ qed_vf_pf_release(p_hwfn);
+ continue;
+ }
+
qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn);
}
+
+ qed_iov_free_hw_info(cdev);
}
int qed_chain_alloc(struct qed_dev *cdev,
@@ -1593,3 +1875,395 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ min_pf_rate;
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+ u32 min_pf_rate)
+
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) &&
+ p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ if (num_vports > QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ QED_WFQ_UNIT);
+ return -EINVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ total_left_rate = min_pf_rate - total_req_min_rate;
+
+ left_rate_per_vp = total_left_rate / non_requested_count;
+ if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return -EINVAL;
+ }
+
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+ struct qed_mcp_link_state *p_link;
+ int rc = 0;
+
+ p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == 0)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = 0;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (!rc && use_wfq)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+ int i, rc = -EINVAL;
+
+ /* Currently not supported; Might change in future */
+ if (cdev->num_hwfns > 1) {
+ DP_NOTICE(cdev,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (!rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+ int i;
+
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ memset(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf4f..dde364d6f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init -
*
* @param cdev
+ * @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -180,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_VF_SRC 0x00000002
+#define QED_DMAE_FLAG_VF_DST 0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
};
/**
@@ -207,6 +213,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 flags);
/**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params);
+
+/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
* @param p_hwfn
@@ -280,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id);
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e71..e29ed5a69 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
- COMMON_EVENT_RESERVED,
- COMMON_EVENT_RESERVED2,
- COMMON_EVENT_RESERVED3,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6,
@@ -44,9 +44,9 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
- COMMON_RAMROD_RESERVED,
- COMMON_RAMROD_RESERVED2,
- COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_VF_START,
+ COMMON_RAMROD_VF_STOP,
+ COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+};
+
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger;
+};
+
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
@@ -626,6 +634,59 @@ struct pf_start_ramrod_data {
u8 reserved0[4];
};
+/* Data for port update ramrod */
+struct protocol_dcb_data {
+ u8 dcb_enable_flag;
+ u8 dcb_priority;
+ u8 dcb_tc;
+ u8 reserved;
+};
+
+/* tunnel configuration */
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
+};
+
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_flag;
+ u8 update_fcoe_dcb_data_flag;
+ u8 update_iscsi_dcb_data_flag;
+ u8 update_roce_dcb_data_flag;
+ u8 update_mf_vlan_flag;
+ __le16 mf_vlan;
+ struct protocol_dcb_data eth_dcb_data;
+ struct protocol_dcb_data fcoe_dcb_data;
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data;
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ MAX_TUNNEL_CLSS
+};
+
enum ports_mode {
ENGX2_PORTX1 /* 2 engines x 1 port */,
ENGX2_PORTX2 /* 2 engines x 2 ports */,
@@ -635,6 +696,16 @@ enum ports_mode {
MAX_PORTS_MODE
};
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct regpair reserved[2];
+};
+
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7];
+};
+
/* Ramrod Header of SPQE */
struct ramrod_header {
__le32 cid /* Slowpath Connection CID */;
@@ -664,6 +735,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt;
};
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr;
+};
+
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid;
+ u8 reserved[7];
+};
+
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger;
+};
+
+struct vf_start_ramrod_data {
+ u8 vf_id;
+ u8 enable_flr_ack;
+ __le16 opaque_fid;
+ u8 personality;
+ u8 reserved[3];
+};
+
+struct vf_stop_ramrod_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@@ -990,7 +1091,7 @@ enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
- PHASE_RESERVED,
+ PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
@@ -1603,6 +1704,19 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
u16 start_pq,
u16 num_pqs);
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_gre_enable,
+ bool ip_gre_enable);
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool eth_geneve_enable,
+ bool ip_geneve_enable);
+
/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
@@ -3586,6 +3700,7 @@ struct public_port {
#define MEDIA_DA_TWINAX 0x3
#define MEDIA_BASE_T 0x4
#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
#define MEDIA_KR 0xf0
#define MEDIA_NOT_PRESENT 0xff
@@ -3788,7 +3903,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_SET_LLDP 0x24000000
#define DRV_MSG_CODE_SET_DCBX 0x25000000
-
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
@@ -3808,6 +3923,7 @@ struct public_drv_mb {
#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
@@ -3865,6 +3981,18 @@ struct public_drv_mb {
#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
+
u32 fw_mb_header;
#define FW_MSG_CODE_MASK 0xffff0000
#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
@@ -5067,4 +5195,8 @@ struct hw_set_image {
struct hw_set_info hw_sets[1];
};
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq);
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a95a3e4b3..0ada7fdb9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -23,6 +23,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
- qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
- hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ if (IS_PF(p_hwfn->cdev)) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u16 opcode_b = 0;
u32 opcode = 0;
- u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
- opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT);
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT;
+ }
- opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
- DMAE_CMD_DST_VF_ID_SHIFT);
+ if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
- p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
@@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+ int rc;
+
+ mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+ return rc;
+}
+
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
@@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
+ if (p_params->eth.is_vf)
+ pq_id += p_hwfn->qm_info.vf_queues_offset +
+ p_params->eth.vf_id;
break;
default:
pq_id = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e56d43379..4367363ad 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -221,6 +221,16 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.
* @param idx
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc3c..e8a3b9da5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -712,6 +712,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+ return 0;
+}
+
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 pf_id,
@@ -732,6 +747,31 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS],
+ u16 vport_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u8 tc;
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = first_tx_pq_id[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+ inc_val);
+ }
+
+ return 0;
+}
+
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 vport_id,
@@ -788,3 +828,130 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+ if (enable)
+ set_bit(bit, var);
+ else
+ clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool vxlan_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable,
+ bool ip_geneve_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+ /* comp ver */
+ reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+ /* EDPM with geneve tunnel not supported in BB_B0 */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ return;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 3269b3610..d358c3bb1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -18,6 +18,7 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 2017b0121..09a6ad3d2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -26,6 +26,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
@@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset;
u32 pi_offset;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
@@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
- sb_info->igu_sb_id, 0, 0);
+ if (IS_PF(p_hwfn->cdev))
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
}
/**
@@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- else
+ else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- (sb_info->igu_sb_id << 3);
+ if (IS_PF(p_hwfn->cdev)) {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
+ }
sb_info->flags |= QED_SB_INFO_INIT;
@@ -2783,24 +2798,20 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid
- )
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
- u32 data = 0;
- u32 cmd_ctrl = 0;
- u32 val = 0;
- u32 sb_bit = 0;
- u32 sb_bit_addr = 0;
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
@@ -2845,11 +2856,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
- u16 opaque,
- bool b_set)
+ u32 sb_id, u16 opaque, bool b_set)
{
- int pi;
+ int pi, i;
/* Set */
if (b_set)
@@ -2858,6 +2867,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
/* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ usleep_range(10, 20);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
@@ -2866,13 +2891,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool b_set,
- bool b_slowpath)
+ bool b_set, bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0;
- u32 val = 0;
+ u32 sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
@@ -2888,14 +2911,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid,
b_set);
- if (b_slowpath) {
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid,
- b_set);
- }
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
}
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
@@ -2935,9 +2958,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
+ u32 val, min_vf = 0, max_vf = 0;
+ u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk;
- u32 val;
- u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@@ -2947,12 +2970,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs */
+ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2986,14 +3016,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++;
}
}
+ } else {
+ if ((blk->function_id >= min_vf) &&
+ (blk->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val) {
+ DP_VERBOSE(p_hwfn->cdev,
+ NETIF_MSG_INTR,
+ "First uninitialized IGU CAM entry at index 0x%04x\n",
+ sb_id);
+ } else {
+ DP_NOTICE(p_hwfn->cdev,
+ "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id); }
+ break;
+ }
+ blk->status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
}
}
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(
+ p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
@@ -3116,6 +3175,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Determine origin of SB id */
+ if ((sb_id >= p_info->igu_base_sb) &&
+ (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ return sb_id - p_info->igu_base_sb;
+ } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+ (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ } else {
+ DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+ return 0;
+ }
+}
+
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c57f2e680..20b468637 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@@ -292,26 +298,8 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
- * @param cleanup_set - set(1) / clear(0)
- * @param opaque_fid - the function for which to perform
- * cleanup, for example a PF on behalf of
- * its VFs.
- */
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
- * @param cleanup_set - set(1) / clear(0)
+ * @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -365,6 +353,16 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 3f35c6ca9..aada4c7e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,137 +31,25 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_l2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
-enum qed_rss_caps {
- QED_RSS_IPV4 = 0x1,
- QED_RSS_IPV6 = 0x2,
- QED_RSS_IPV4_TCP = 0x4,
- QED_RSS_IPV6_TCP = 0x8,
- QED_RSS_IPV4_UDP = 0x10,
- QED_RSS_IPV6_UDP = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
- QED_FILTER_ADD,
- QED_FILTER_REMOVE,
- QED_FILTER_MOVE,
- QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
- QED_FILTER_FLUSH, /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
- QED_FILTER_MAC,
- QED_FILTER_VLAN,
- QED_FILTER_MAC_VLAN,
- QED_FILTER_INNER_MAC,
- QED_FILTER_INNER_VLAN,
- QED_FILTER_INNER_PAIR,
- QED_FILTER_INNER_MAC_VNI_PAIR,
- QED_FILTER_MAC_VNI_PAIR,
- QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
- enum qed_filter_opcode opcode;
- enum qed_filter_ucast_type type;
- u8 is_rx_filter;
- u8 is_tx_filter;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- unsigned char mac[ETH_ALEN];
- u8 assert_on_error;
- u16 vlan;
- u32 vni;
-};
-
-struct qed_filter_mcast {
- /* MOVE is not supported for multicast */
- enum qed_filter_opcode opcode;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- u8 num_mc_addrs;
-#define QED_MAX_MC_ADDRS 64
- unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
- u8 update_rx_mode_config;
- u8 update_tx_mode_config;
- u8 rx_accept_filter;
- u8 tx_accept_filter;
-#define QED_ACCEPT_NONE 0x01
-#define QED_ACCEPT_UCAST_MATCHED 0x02
-#define QED_ACCEPT_UCAST_UNMATCHED 0x04
-#define QED_ACCEPT_MCAST_MATCHED 0x08
-#define QED_ACCEPT_MCAST_UNMATCHED 0x10
-#define QED_ACCEPT_BCAST 0x20
-};
-
-struct qed_sp_vport_update_params {
- u16 opaque_fid;
- u8 vport_id;
- u8 update_vport_active_rx_flg;
- u8 vport_active_rx_flg;
- u8 update_vport_active_tx_flg;
- u8 vport_active_tx_flg;
- u8 update_approx_mcast_flg;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- unsigned long bins[8];
- struct qed_rss_params *rss_params;
- struct qed_filter_accept_flags accept_flags;
-};
-
-enum qed_tpa_mode {
- QED_TPA_MODE_NONE,
- QED_TPA_MODE_UNUSED,
- QED_TPA_MODE_GRO,
- QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
- enum qed_tpa_mode tpa_mode;
- bool remove_inner_vlan;
- bool drop_ttl0;
- u8 max_buffers_per_cqe;
- u32 concrete_fid;
- u16 opaque_fid;
- u8 vport_id;
- u16 mtu;
-};
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
+ u8 abs_vport_id = 0;
int rc = -EINVAL;
u16 rx_mode = 0;
- u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
@@ -184,6 +72,7 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -211,6 +100,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break;
}
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -218,6 +109,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+ }
+
+ return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@@ -342,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
@@ -363,6 +265,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
}
static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sp_vport_update_params *p_params)
@@ -383,20 +317,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
}
}
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_update_params *p_params,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
- u8 abs_vport_id = 0;
+ u8 abs_vport_id = 0, val;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -425,6 +363,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +395,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
{
struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
@@ -449,6 +407,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_vport_stop(p_hwfn);
+
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -470,13 +431,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+ struct qed_filter_accept_flags *p_accept_flags)
+{
+ struct qed_sp_vport_update_params s_params;
+
+ memset(&s_params, 0, sizeof(s_params));
+ memcpy(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct qed_filter_accept_flags));
+
+ return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -493,6 +467,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc)
+ return rc;
+ continue;
+ }
+
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != 0) {
@@ -527,16 +508,14 @@ static int qed_sp_release_queue_cid(
return 0;
}
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -605,8 +584,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void __iomem **pp_prod)
+ u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
u64 init_prod_val = 0;
@@ -614,6 +592,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_rxq_start(p_hwfn,
+ params->queue_id,
+ params->sb,
+ params->sb_idx,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ }
+
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0)
return rc;
@@ -656,10 +644,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 qid, abs_rx_q_id = 0;
+ int rc = -EINVAL;
+ u8 i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ qid = rx_queue_id + i;
+ p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+ /* Get SPQ entry */
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion)
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +705,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@@ -703,15 +743,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
}
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -765,14 +804,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- u16 pbl_size,
- void __iomem **pp_doorbell)
+ u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_txq_start(p_hwfn,
+ p_params->queue_id,
+ p_params->sb,
+ p_params->sb_idx,
+ pbl_addr, pbl_size, pp_doorbell);
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc)
return rc;
@@ -813,14 +859,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1064,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0;
}
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_filter_ucast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct vport_filter_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1118,7 +1166,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
}
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
{
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
@@ -1201,11 +1249,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
- struct qed_filter_mcast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
int rc = 0;
int i;
@@ -1221,8 +1268,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1247,8 +1296,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1257,6 +1308,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd,
comp_mode,
p_comp_data);
+ if (rc != 0)
+ break;
}
return rc;
@@ -1265,12 +1318,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1345,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- pstats_addr, pstats_len);
-
- p_stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
- u32 *p_addr,
- u32 *p_len)
-{
- *p_addr = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- *p_len = sizeof(struct tstorm_per_port_stat);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1361,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
- u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+
+ if (IS_PF(p_hwfn->cdev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
- __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- tstats_addr, tstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1387,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1414,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- ustats_addr, ustats_len);
-
- p_stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1453,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- mstats_addr, mstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
@@ -1428,16 +1483,16 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
sizeof(port_stats));
p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
+ p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383;
p_stats->rx_crc_errors += port_stats.pmm.rfcs;
p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
p_stats->rx_pause_frames += port_stats.pmm.rxpf;
@@ -1481,44 +1536,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
- u16 statistics_bin)
+ u16 statistics_bin, bool b_get_port_stats)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
- if (p_hwfn->mcp_info)
+ if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
- u8 fw_vport = 0;
- int i;
+ u8 fw_vport = 0;
+ int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt;
-
- /* The main vport index is relative first */
- if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
- DP_ERR(p_hwfn, "No vport available!\n");
- continue;
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
+
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
}
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
- __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(cdev) ? true : false);
- qed_ptt_release(p_hwfn, p_ptt);
+out:
+ if (IS_PF(cdev) && p_ptt)
+ qed_ptt_release(p_hwfn, p_ptt);
}
}
@@ -1552,10 +1612,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
u32 addr = 0, len = 0;
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@@ -1572,7 +1633,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
- qed_ptt_release(p_hwfn, p_ptt);
+ if (IS_PF(cdev))
+ qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1655,61 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- for_each_hwfn(cdev, i)
- info->num_queues += FEAT_NUM(&cdev->hwfns[i],
- QED_PF_L2_QUE);
- if (cdev->int_params.fp_msix_cnt)
- info->num_queues = min_t(u8, info->num_queues,
- cdev->int_params.fp_msix_cnt);
+ if (IS_PF(cdev)) {
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues +=
+ FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues =
+ min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
- info->num_queues = cdev->num_hwfns;
- }
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+ if (cdev->num_hwfns > 1) {
+ u8 queues = 0;
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
- ether_addr_copy(info->port_mac,
- cdev->hwfns[0].hw_info.hw_mac_addr);
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
+
+ qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+ &info->num_vlan_filters);
+ qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+ }
qed_fill_dev_info(cdev, &info->common);
+ if (IS_VF(cdev))
+ memset(info->common.hw_mac, 0, ETH_ALEN);
+
return 0;
}
static void qed_register_eth_ops(struct qed_dev *cdev,
- struct qed_eth_cb_ops *ops,
- void *cookie)
+ struct qed_eth_cb_ops *ops, void *cookie)
+{
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+
+ /* For VF, we start bulletin reading */
+ if (IS_VF(cdev))
+ qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
{
- cdev->protocol_ops.eth = ops;
- cdev->ops_cookie = cookie;
+ if (IS_PF(cdev))
+ return true;
+
+ return qed_vf_check_mac(&cdev->hwfns[0], mac);
}
static int qed_start_vport(struct qed_dev *cdev,
@@ -1633,6 +1724,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan;
+ start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@@ -1653,7 +1745,8 @@ static int qed_start_vport(struct qed_dev *cdev,
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
@@ -1699,6 +1792,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
@@ -1744,9 +1839,7 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_rss_params.update_rss_capabilities = 1;
sp_rss_params.update_rss_ind_table = 1;
sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = QED_RSS_IPV4 |
- QED_RSS_IPV6 |
- QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_caps = params->rss_params.rss_caps;
sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
memcpy(sp_rss_params.rss_ind_table,
params->rss_params.rss_ind_table,
@@ -1899,6 +1992,39 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0;
}
+static int qed_tunn_configure(struct qed_dev *cdev,
+ struct qed_tunn_params *tunn_params)
+{
+ struct qed_tunn_update_params tunn_info;
+ int i, rc;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ if (tunn_params->update_vxlan_port == 1) {
+ tunn_info.update_vxlan_udp_port = 1;
+ tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ }
+
+ if (tunn_params->update_geneve_port == 1) {
+ tunn_info.update_geneve_udp_port = 1;
+ tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type)
{
@@ -2026,10 +2152,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+ .iov = &qed_iov_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
+ .check_mac = &qed_check_mac,
.vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport,
@@ -2041,16 +2175,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
+ .tunn_config = &qed_tunn_configure,
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
{
- if (version != QED_ETH_INTERFACE_VERSION) {
- pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
- version, QED_ETH_INTERFACE_VERSION);
- return NULL;
- }
-
return &qed_eth_ops_pass;
}
EXPORT_SYMBOL(qed_get_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 000000000..002114543
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool tx_switching;
+ bool only_untagged;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param num_rxqs Allow to update multiple rx
+ * queues, from rx_queue_id to
+ * (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 53c9e9aa1..a41c6b559 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -24,10 +24,12 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
+#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_hw.h"
+#include "qed_selftest.h"
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -124,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
@@ -156,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev,
}
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (cdev->pci_params.pm_cap == 0)
+ if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = qed_set_coherency_mask(cdev);
@@ -174,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2;
}
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ if (IS_PF(cdev)) {
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
}
return 0;
@@ -206,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ if (IS_PF(cdev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+ dev_info->tx_switching = true;
+ } else {
+ qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
- qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mfw_rev, NULL);
- ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (ptt) {
- qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
- &dev_info->flash_size);
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
- qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+ } else {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+ &dev_info->mfw_rev, NULL);
}
return 0;
@@ -256,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
- enum qed_protocol protocol,
- u32 dp_module,
- u8 dp_level)
+ struct qed_probe_params *params)
{
struct qed_dev *cdev;
int rc;
@@ -267,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
- cdev->protocol = protocol;
+ cdev->protocol = params->protocol;
+
+ if (params->is_vf)
+ cdev->b_is_vf = true;
- qed_init_dp(cdev, dp_module, dp_level);
+ qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
@@ -395,15 +413,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
@@ -663,6 +683,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0;
}
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+ int rc;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+ &cdev->int_params.in.num_vectors);
+ if (cdev->num_hwfns > 1) {
+ u8 vectors = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+ cdev->int_params.in.num_vectors += vectors;
+ }
+
+ /* We want a minimum of one fastpath vector per vf hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+ rc = qed_set_int_mode(cdev, true);
+ if (rc)
+ return rc;
+
+ cdev->int_params.fp_msix_base = 0;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+ return 0;
+}
+
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@@ -744,39 +793,62 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
+ struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
- int rc;
+ int rc = -EINVAL;
- rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
- &cdev->pdev->dev);
- if (rc) {
- DP_NOTICE(cdev,
- "Failed to find fw file - /lib/firmware/%s\n",
- QED_FW_FILE_NAME);
+ if (qed_iov_wq_start(cdev))
goto err;
+
+ if (IS_PF(cdev)) {
+ rc = reject_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
- rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (IS_PF(cdev))
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ else
+ rc = qed_slowpath_vf_setup_int(cdev);
if (rc)
goto err1;
- /* Allocate stream for unzipping */
- rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err2;
+ if (IS_PF(cdev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err2;
+ }
+
+ data = cdev->firmware->data;
}
- /* Start the slowpath */
- data = cdev->firmware->data;
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+ 1 << QED_MODE_L2GRE_TUNN |
+ 1 << QED_MODE_IPGRE_TUNN |
+ 1 << QED_MODE_L2GENEVE_TUNN |
+ 1 << QED_MODE_IPGENEVE_TUNN;
- rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
+
+ /* Start the slowpath */
+ rc = qed_hw_init(cdev, &tunn_info, true,
+ cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err2;
@@ -784,18 +856,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
- hwfn = QED_LEADING_HWFN(cdev);
- drv_version.version = (params->drv_major << 24) |
- (params->drv_minor << 16) |
- (params->drv_rev << 8) |
- (params->drv_eng);
- strlcpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
- rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
- &drv_version);
- if (rc) {
- DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ if (IS_PF(cdev)) {
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
}
qed_reset_vport_stats(cdev);
@@ -804,13 +878,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2:
qed_hw_timers_stop_all(cdev);
- qed_slowpath_irq_free(cdev);
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
- release_firmware(cdev->firmware);
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ qed_iov_wq_stop(cdev, false);
return rc;
}
@@ -820,15 +898,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
- qed_free_stream_mem(cdev);
+ if (IS_PF(cdev)) {
+ qed_free_stream_mem(cdev);
+ qed_sriov_disable(cdev, true);
- qed_nic_stop(cdev);
- qed_slowpath_irq_free(cdev);
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+ }
qed_disable_msix(cdev);
qed_nic_reset(cdev);
- release_firmware(cdev->firmware);
+ qed_iov_wq_stop(cdev, true);
+
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
return 0;
}
@@ -902,6 +986,11 @@ static u32 qed_sb_release(struct qed_dev *cdev,
return rc;
}
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+ return true;
+}
+
static int qed_set_link(struct qed_dev *cdev,
struct qed_link_params *params)
{
@@ -913,6 +1002,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev)
return -ENODEV;
+ if (IS_VF(cdev))
+ return 0;
+
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
@@ -944,6 +1036,39 @@ static int qed_set_link(struct qed_dev *cdev,
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+ switch (params->loopback_mode) {
+ case QED_LINK_LOOPBACK_INT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_INT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT_PHY:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT:
+ link_params->loopback_mode = PMM_LOOPBACK_EXT;
+ break;
+ case QED_LINK_LOOPBACK_MAC:
+ link_params->loopback_mode = PMM_LOOPBACK_MAC;
+ break;
+ default:
+ link_params->loopback_mode = PMM_LOOPBACK_NONE;
+ break;
+ }
+ }
rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
@@ -960,6 +1085,7 @@ static int qed_get_port_type(u32 media_type)
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
@@ -980,6 +1106,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@@ -991,10 +1150,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
+ }
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@@ -1096,7 +1255,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ int i;
+
qed_fill_link(&cdev->hwfns[0], if_link);
+
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
}
void qed_link_update(struct qed_hwfn *hwfn)
@@ -1106,6 +1270,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
+ qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link);
@@ -1117,6 +1282,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt;
int i, rc;
+ if (IS_VF(cdev))
+ return 0;
+
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);
@@ -1150,7 +1318,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+struct qed_selftest_ops qed_selftest_ops_pass = {
+ .selftest_memory = &qed_selftest_memory,
+ .selftest_interrupt = &qed_selftest_interrupt,
+ .selftest_register = &qed_selftest_register,
+ .selftest_clock = &qed_selftest_clock,
+};
+
const struct qed_common_ops qed_common_ops_pass = {
+ .selftest = &qed_selftest_ops_pass,
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
@@ -1164,6 +1340,7 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
.drain = &qed_drain,
@@ -1172,14 +1349,3 @@ const struct qed_common_ops qed_common_ops_pass = {
.chain_free = &qed_chain_free,
.set_led = &qed_set_led,
};
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
- switch (protocol) {
- case QED_PROTOCOL_ETH:
- return QED_ETH_INTERFACE_VERSION;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b89c9a8e1..118236179 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -15,10 +15,13 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -440,6 +443,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ QED_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+ return -EBUSY;
+ }
+
+ /* Clear the ACK bits */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ qed_wr(p_hwfn, p_ptt,
+ func_addr +
+ offsetof(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -472,6 +544,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
bool b_reset)
{
struct qed_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +600,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->speed = 0;
}
- /* Correct speed according to bandwidth allocation */
- if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
- p_link->speed = p_link->speed *
- p_hwfn->mcp_info->func_info.bandwidth_max /
- 100;
- qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_link->speed);
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
- p_link->speed);
- }
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
@@ -648,6 +724,77 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+ qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -676,9 +823,27 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ qed_mcp_update_bw(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -709,26 +874,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *p_mfw_ver)
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
u32 global_offsize;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
+ if (IS_VF(p_hwfn->cdev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return 0;
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return -EINVAL;
+ }
+ }
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
- public_base,
+ SECTION_OFFSIZE_ADDR(p_hwfn->
+ mcp_info->public_base,
PUBLIC_GLOBAL));
- *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize, 0) +
- offsetof(struct public_global, mfw_ver));
-
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_mfw_ver =
+ qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + offsetof(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != NULL) {
+ *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global,
+ running_bundle_id));
+ }
return 0;
}
@@ -739,6 +920,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
+ if (IS_VF(cdev))
+ return -EINVAL;
+
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
@@ -758,28 +942,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return 0;
}
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
@@ -818,26 +980,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
-
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -914,6 +1057,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{
u32 flash_size;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@@ -924,6 +1070,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ int rc;
+
+ /* Only Leader can configure MSIX, and need to take CMT into account */
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return 0;
+ num *= p_hwfn->cdev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -938,9 +1115,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
+
for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
val = cpu_to_be32(p_ver->name[i]);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+ *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1157,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 50917a213..6dd59eb7f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
struct qed_mcp_link_state {
bool link_up;
- u32 speed; /* In Mb/s */
+ u32 min_pf_rate;
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in Mb/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -141,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/**
* @brief Get the management firmware version value
*
- * @param cdev - qed dev pointer
- * @param mfw_ver - mfw version value
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
*
- * @return int - 0 - operation was successul.
+ * @return int - 0 - operation was successful.
*/
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *mfw_ver);
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
@@ -237,6 +248,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -360,6 +393,18 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
@@ -389,4 +434,27 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b1622e..3a6c506f0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -39,6 +39,10 @@
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+ 0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
@@ -77,6 +81,8 @@
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -111,6 +117,8 @@
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+ 0x0096f0UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
@@ -119,6 +127,8 @@
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+ 0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
@@ -161,6 +171,10 @@
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+ 0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+ 0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
@@ -385,6 +399,8 @@
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
@@ -411,6 +427,10 @@
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+ 0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+ 0x180900UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
@@ -427,4 +447,37 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+
+#define NIG_REG_VXLAN_PORT 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 000000000..a342bfe42
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 000000000..50eb0b499
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914b6..ea4e9ce53 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
union ramrod_data {
struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -61,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@@ -338,13 +342,29 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_tunn
* @param mode
+ * @param allow_npar_tx_switch
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode);
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
@@ -362,4 +382,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37d4..67f6ce3c8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -15,11 +15,13 @@
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
@@ -87,8 +89,218 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+ switch (type) {
+ case QED_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case QED_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case QED_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case QED_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+ unsigned long update_mask = p_src->tunn_mode_update_mask;
+ unsigned long tunn_mode = p_src->tunn_mode;
+ unsigned long new_tunn_mode = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ }
+
+ p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode = p_src->tunn_mode;
+ enum tunnel_clss type;
+
+ qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+ p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+ p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ unsigned long tunn_mode)
+{
+ u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+ u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ l2gre_enable = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ ipgre_enable = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ vxlan_enable = 1;
+
+ qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ l2geneve_enable = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ ipgeneve_enable = 1;
+
+ qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+ ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_start_params *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode;
+ enum tunnel_clss type;
+
+ if (!p_src)
+ return;
+
+ tunn_mode = p_src->tunn_mode;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode)
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -143,16 +355,103 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ }
+
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
p_ramrod->outer_tag);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ if (p_tunn) {
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ if (p_tunn->update_vxlan_udp_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
+
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+ return rc;
+}
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
@@ -173,3 +472,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 89469d5aa..03601dfc0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
@@ -212,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -242,10 +239,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
- p_eqe->protocol_id);
- return -EINVAL;
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return qed_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+ }
}
/***************************************************************************
@@ -379,6 +383,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
@@ -603,7 +610,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
@@ -738,6 +747,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
@@ -791,13 +809,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+ __set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
- bitmap_clear(p_spq->p_comp_bitmap,
- p_spq->comp_bitmap_idx,
- SPQ_RING_SIZE);
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
@@ -833,8 +850,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 000000000..c325ee857
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,3613 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(opaque_vfid);
+
+ p_ramrod->personality = PERSONALITY_ETH;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return NULL;
+ }
+
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
+{
+ struct qed_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct qed_dmae_params params;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return -EINVAL;
+
+ if (!p_vf->vf_bulletin)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ memset(&params, 0, sizeof(params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+ struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ pci_read_config_dword(cdev->pdev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ iov->nres,
+ iov->cap,
+ iov->ctrl,
+ iov->total_vfs,
+ iov->initial_vfs,
+ iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+ iov->total_vfs > NUM_OF_VFS(cdev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(cdev,
+ "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+ return;
+ }
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+ !(p_sb->status & QED_IGU_STATUS_PF)) {
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct qed_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "qed_iov_setup_vfdb called without allocating mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct qed_bulletin_content) +
+ bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ vf->vport_id = idx + 1;
+ }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ &p_iov_info->mbx_msg_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ &p_iov_info->mbx_reply_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ &p_iov_info->bulletins_phys,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64) p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64) p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+ return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr);
+
+ if (p_iov_info->p_bulletins)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return 0;
+ }
+
+ p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ qed_iov_setup_vfdb(p_hwfn);
+ qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ qed_iov_free_vfdb(p_hwfn);
+ kfree(p_hwfn->pf_iov_info);
+ }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int pos;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* Learn the PCI configuration */
+ pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+ return 0;
+ }
+
+ /* Allocate a new struct for IOV information */
+ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+ if (!cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ return -ENOMEM;
+ }
+ cdev->p_iov_info->pos = pos;
+
+ rc = qed_iov_pci_cfg_info(cdev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!cdev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+ return 0;
+ }
+
+ /* Calculate the first VF index - this is a bit tricky; Basically,
+ * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+ * after the first engine's VFs.
+ */
+ cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ cdev->p_iov_info->first_vf_in_pf);
+
+ return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+ return false;
+
+ return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct qed_vf_info *vf;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_QED_SRIOV(cdev))
+ return;
+
+ for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+ qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ int rc;
+
+ if (vf->to_disable)
+ return 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n",
+ vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (vf->state != VF_STOPPED) {
+ DP_NOTICE(p_hwfn, "VF[%02x] is already started\n",
+ vf->abs_vf_id);
+ return -EINVAL;
+ }
+
+ /* Start VF */
+ rc = qed_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u16 num_rx_queues)
+{
+ struct qed_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
+ u32 val = 0;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+ if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8) num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ QED_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues)
+{
+ u8 num_of_vf_avaiable_chains = 0;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+ u32 cids;
+ u8 i;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+ return -EINVAL;
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+ vf->relative_vf_id, num_rx_queues, (u16) cids);
+ num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+ num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_rx_queues);
+ if (!num_of_vf_avaiable_chains) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return -ENOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_avaiable_chains;
+ vf->num_txqs = num_of_vf_avaiable_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+ vf->igu_sbs[i]);
+
+ if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn,
+ "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+ vf->relative_vf_id, queue_id);
+ return -EINVAL;
+ }
+
+ /* CIDs are per-VF, so no problem having them 0-based. */
+ vf->vf_queues[i].fw_rx_qid = queue_id;
+ vf->vf_queues[i].fw_tx_qid = queue_id;
+ vf->vf_queues[i].fw_cid = i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ }
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (!rc) {
+ vf->b_init = true;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->bulletin.p_virt)
+ memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+ memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+ qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ if (vf->state != VF_STOPPED) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ return rc;
+ }
+
+ vf->state = VF_STOPPED;
+ }
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs--;
+ }
+
+ return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+ return;
+ }
+
+ total_length += tlv->length;
+
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct qed_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+ enum qed_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case QED_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case QED_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case QED_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case QED_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case QED_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case QED_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx,
+ u8 status,
+ u16 tlvs_mask, u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+ qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - vport_update response: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return NULL;
+
+ return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+ if (!vf_info)
+ return;
+
+ /* Clear the VF mac */
+ memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u32 i;
+
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->num_mac_filters = 0;
+ p_vf->num_vlan_filters = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+ p_vf->vf_queues[i].rxq_active = 0;
+
+ memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+ struct pf_vf_resc *resc = &resp->resc;
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
+ req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
+ req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
+ req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an incompatible driver [VF needs FW %02x:%02x:%02x:%02x but Hypervisor is using %02x:%02x:%02x:%02x]\n",
+ vf->abs_vf_id,
+ req->vfdev_info.fw_major,
+ req->vfdev_info.fw_minor,
+ req->vfdev_info.fw_revision,
+ req->vfdev_info.fw_engineering,
+ FW_MAJOR_VERSION,
+ FW_MINOR_VERSION,
+ FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->cdev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support 100g\n",
+ vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* Fill in vf info stuff */
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+ vf->num_mac_filters = 1;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+ pfdev_info->db_size = 0;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ pfdev_info->stats_info.mstats.address =
+ PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.mstats.len =
+ sizeof(struct eth_mstorm_per_queue_stat);
+
+ pfdev_info->stats_info.ustats.address =
+ PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.ustats.len =
+ sizeof(struct eth_ustorm_per_queue_stat);
+
+ pfdev_info->stats_info.pstats.address =
+ PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
+ pfdev_info->stats_info.pstats.len =
+ sizeof(struct eth_pstorm_per_queue_stat);
+
+ pfdev_info->stats_info.tstats.address = 0;
+ pfdev_info->stats_info.tstats.len = 0;
+
+ memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+ qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+ pfdev_info->dev_type = p_hwfn->cdev->type;
+ pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+ resc->num_rxqs = vf->num_rxqs;
+ resc->num_txqs = vf->num_txqs;
+ resc->num_sbs = vf->num_sbs;
+ for (i = 0; i < resc->num_sbs; i++) {
+ resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
+ resc->hw_sbs[i].sb_qid = 0;
+ }
+
+ for (i = 0; i < resc->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&resc->hw_qid[i]);
+ resc->cid[i] = vf->vf_queues[i].fw_cid;
+ }
+
+ resc->num_mac_filters = min_t(u8, vf->num_mac_filters,
+ req->resc_request.num_mac_filters);
+ resc->num_vlan_filters = min_t(u8, vf->num_vlan_filters,
+ req->resc_request.num_vlan_filters);
+
+ /* This isn't really required as VF isn't limited, but some VFs might
+ * actually test this value, so need to provide it.
+ */
+ resc->num_mc_filters = req->resc_request.num_mc_filters;
+
+ /* Fill agreed size of bulletin board in response */
+ resp->bulletin_size = vf->bulletin.size;
+ qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+ vf->abs_vf_id,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters);
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool val)
+{
+ struct qed_sp_vport_update_params params;
+ int rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_filter_ucast filter;
+ int rc = 0;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = QED_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = QED_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to configure VLAN [%04x] to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+ struct qed_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return -EINVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_MAC;
+ filter.opcode = QED_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct qed_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+ QED_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ memset(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ? 1
+ : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = qed_sp_vport_update(p_hwfn,
+ &vport_update,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ u16 qid;
+
+ if (!p_vf->vf_queues[i].rxq_active)
+ continue;
+
+ qid = p_vf->vf_queues[i].fw_rx_qid;
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+ 1, 0, 1,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send Rx update fo queue[0x%04x]\n",
+ qid);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_start_params params = { 0 };
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ int rc;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+ __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
+#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
+ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 hw_qid = 0;
+
+ req = &mbx->req_virt->start_rxq;
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[req->rx_qid].fw_rx_qid,
+ &hw_qid);
+
+ p_tlv->offset = MSTORM_QZONE_START(p_hwfn->cdev) +
+ hw_qid * MSTORM_QZONE_SIZE +
+ offsetof(struct mstorm_eth_queue_zone,
+ rx_producers);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_rxq;
+ params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+ vf->vf_queues[req->rx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vf_queues[req->rx_qid].rxq_active = true;
+ vf->num_active_rxqs++;
+ }
+
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ union qed_qm_pq_params pq_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_start_txq_tlv *req;
+ int rc;
+
+ /* Prepare the parameters which would choose the right PQ */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.is_vf = 1;
+ pq_params.eth.vf_id = vf->relative_vf_id;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+ params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->pbl_addr,
+ req->pbl_size, &pq_params);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+ else
+ vf->vf_queues[req->tx_qid].txq_active = true;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
+ length, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+ int rc = 0;
+ int qid;
+
+ if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ if (vf->vf_queues[qid].rxq_active) {
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_rx_qid, false,
+ cqe_completion);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].rxq_active = false;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+ int rc = 0;
+ int qid;
+
+ if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ if (vf->vf_queues[qid].txq_active) {
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_tx_qid);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].txq_active = false;
+ }
+ return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u16 qid;
+ int rc;
+ u8 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
+
+ if (!vf->vf_queues[qid].rxq_active) {
+ DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+ qid);
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ vf->vf_queues[qid].fw_rx_qid,
+ 1,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+ return NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Extended tlv type %d, length %d found\n",
+ p_tlv->type, p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+ return NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ memcpy(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_rss_params *p_rss,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ u16 i, q_idx, max_q_idx;
+ u16 table_size;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = NULL;
+ return;
+ }
+
+ memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+ p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+ table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ u16 index = vf->vf_queues[0].fw_rx_qid;
+
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is out of range\n",
+ i, q_idx);
+ else if (!vf->vf_queues[q_idx].rxq_active)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ else
+ index = vf->vf_queues[q_idx].fw_rx_qid;
+ p_rss->rss_ind_table[i] = index;
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_sge_tpa_params *p_sge_tpa,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = NULL;
+ return;
+ }
+
+ memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct qed_sge_tpa_params sge_tpa_params;
+ struct qed_rss_params rss_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 tlvs_mask = 0;
+ u16 length;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct qed_sp_vport_update_params.
+ */
+ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+ mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (!tlvs_mask) {
+ DP_NOTICE(p_hwfn,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_mask);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ if (p_params->type == QED_FILTER_MAC)
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return 0;
+
+ if (p_params->opcode == QED_FILTER_ADD ||
+ p_params->opcode == QED_FILTER_REPLACE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
+{
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ /* No real decision to make; Store the configured MAC */
+ if (params->type == QED_FILTER_MAC ||
+ params->type == QED_FILTER_MAC_VLAN)
+ ether_addr_copy(vf->mac, params->mac);
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_filter_ucast params;
+ int rc;
+
+ /* Prepare the unicast filter params */
+ memset(&params, 0, sizeof(struct qed_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum qed_filter_opcode)req->opcode;
+ params.type = (enum qed_filter_ucast_type)req->type;
+
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ memcpy(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1],
+ params.mac[2], params.mac[3],
+ params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_VLAN ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == QED_FILTER_ADD ||
+ params.opcode == QED_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_MAC ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+ (params.opcode != QED_FILTER_ADD &&
+ params.opcode != QED_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, PFVF_STATUS_SUCCESS);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ msleep(20);
+ }
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ msleep(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct qed_vf_info *p_vf;
+ int rc = 0;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return 0;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ goto cleanup;
+
+ rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ int rc = 0;
+ u16 i;
+
+ memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ msleep(100);
+
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+ qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ u16 i, found = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+ return 0;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = 1;
+ }
+ }
+
+ return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid)
+{
+ struct qed_iov_vf_mbx *mbx;
+ struct qed_vf_info *p_vf;
+ int i;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* qed_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "qed_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ /* check if tlv type is known */
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_ERR(p_hwfn,
+ "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
+ mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
+
+ for (i = 0; i < 20; i++) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "%x ",
+ mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
+ }
+ }
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+ memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ struct qed_vf_info *p_vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+ abs_vfid);
+ return 0;
+ }
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ /* Mark the event and schedule the workqueue */
+ qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+ return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+ &data->vf_pf_channel.msg_addr);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return -EINVAL;
+ }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ return i;
+
+out:
+ return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+ int vfid)
+{
+ struct qed_dmae_params params;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (qed_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+ struct qed_vf_info *vf;
+ int rc = -EINVAL;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = 0;
+ goto out;
+ }
+
+ rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid, int val)
+{
+ struct qed_vf_info *vf;
+ u8 abs_vp_id = 0;
+ int rc;
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc)
+ return rc;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+ struct qed_vf_info *vf;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return -EINVAL;
+ }
+ }
+
+ vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ vport_id = vf->vport_id;
+
+ return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_wfq_data *vf_vp_wfq;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &hwfn->iov_task_flags);
+ smp_mb__after_atomic();
+ DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, i)
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+
+ /* Mark VFs for disablement */
+ qed_iov_set_vfs_to_disable(cdev, true);
+
+ if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+ pci_disable_sriov(cdev->pdev);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ /* Failure to acquire the ptt in 100g creates an odd error
+ * where the first engine has already relased IOV.
+ */
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Clean WFQ db and configure equal weight for all vports */
+ qed_clean_wfq_db(hwfn, ptt);
+
+ qed_for_each_vf(hwfn, j) {
+ int k;
+
+ if (!qed_iov_is_valid_vfid(hwfn, j, true))
+ continue;
+
+ /* Wait until VF is disabled before releasing */
+ for (k = 0; k < 100; k++) {
+ if (!qed_iov_is_vf_stopped(hwfn, j))
+ msleep(20);
+ else
+ break;
+ }
+
+ if (k < 100)
+ qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+ ptt, j);
+ else
+ DP_ERR(hwfn,
+ "Timeout waiting for VF's FLR to end\n");
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_vfs_to_disable(cdev, false);
+
+ return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+ struct qed_sb_cnt_info sb_cnt_info;
+ int i, j, rc;
+
+ if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+ DP_NOTICE(cdev, "Can start at most %d VFs\n",
+ RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+ return -EINVAL;
+ }
+
+ /* Initialize HW for VF access */
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[j];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int num_sbs = 0, limit = 16;
+
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (IS_MF_DEFAULT(hwfn))
+ limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+ num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+ for (i = 0; i < num; i++) {
+ if (!qed_iov_is_valid_vfid(hwfn, i, false))
+ continue;
+
+ rc = qed_iov_init_hw_for_vf(hwfn,
+ ptt, i, num_sbs / num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+ qed_ptt_release(hwfn, ptt);
+ goto err;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ /* Enable SRIOV PCIe functions */
+ rc = pci_enable_sriov(cdev->pdev, num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+ goto err;
+ }
+
+ return num;
+
+err:
+ qed_sriov_disable(cdev, false);
+ return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+ if (!IS_QED_SRIOV(cdev)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs_param)
+ return qed_sriov_enable(cdev, num_vfs_param);
+ else
+ return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced MAC, and schedule the IOV task */
+ ether_addr_copy(vf_info->forced_mac, mac);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced vlan, and schedule the IOV task */
+ vf_info->forced_vlan = vid;
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_public_vf_info *vf_info;
+ struct qed_mcp_link_state link;
+ u32 tx_rate;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+ qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+ /* Fill information about VF */
+ ivi->vf = vf_id;
+
+ if (is_valid_ether_addr(vf_info->forced_mac))
+ ether_addr_copy(ivi->mac, vf_info->forced_mac);
+ else
+ ether_addr_copy(ivi->mac, vf_info->mac);
+
+ ivi->vlan = vf_info->forced_vlan;
+ ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+ ivi->linkstate = vf_info->link_state;
+ tx_rate = vf_info->tx_rate;
+ ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+ ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+ return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+ if (!vf_info)
+ continue;
+
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(caps));
+
+ /* Modify link according to the VF's configured link state */
+ switch (vf_info->link_state) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link.link_up = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link.link_up = true;
+ /* Set speed according to maximum supported by HW.
+ * that is 40G for regular devices and 100G for CMT
+ * mode devices.
+ */
+ link.speed = (hwfn->cdev->num_hwfns > 1) ?
+ 100000 : 40000;
+ default:
+ /* In auto mode pass PF link image to VF */
+ break;
+ }
+
+ if (link.link_up && vf_info->tx_rate) {
+ struct qed_ptt *ptt;
+ int rate;
+
+ rate = min_t(int, vf_info->tx_rate, link.speed);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+ return;
+ }
+
+ if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+ vf_info->tx_rate = rate;
+ link.speed = rate;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_link(hwfn, i, &params, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+ int vf_id, int link_state)
+{
+ int i;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* Handle configuration of link state */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ if (!vf)
+ continue;
+
+ if (vf->link_state == link_state)
+ continue;
+
+ vf->link_state = link_state;
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+ }
+
+ return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+ int i, rc = -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set tx rate\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+ vf->tx_rate = rate;
+
+ qed_inform_vf_link_state(p_hwfn);
+ }
+
+ return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+ int vfid, u32 min_rate, u32 max_rate)
+{
+ int rc_min = 0, rc_max = 0;
+
+ if (max_rate)
+ rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+ if (min_rate)
+ rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+ if (rc_max | rc_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+ u64 events[QED_VF_ARRAY_LENGTH];
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Can't acquire PTT; re-scheduling\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+ events[0], events[1], events[2]);
+
+ qed_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!(events[i / 64] & (1ULL << (i % 64))))
+ continue;
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ qed_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+ int i;
+
+ qed_for_each_vf(hwfn, i) {
+ struct qed_public_vf_info *info;
+ bool update = false;
+ u8 *mac;
+
+ info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (!info)
+ continue;
+
+ /* Update data on bulletin board */
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Update bulletin board with forced MAC */
+ qed_iov_bulletin_set_forced_mac(hwfn,
+ info->forced_mac, i);
+ update = true;
+ }
+
+ if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+ info->forced_vlan) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+ info->forced_vlan,
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ qed_iov_bulletin_set_forced_vlan(hwfn,
+ info->forced_vlan, i);
+ update = true;
+ }
+
+ if (update)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ qed_for_each_vf(hwfn, i)
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ int rc;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+ if (rc)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+ qed_handle_vf_msg(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_pf_set_vf_unicast(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].iov_wq)
+ continue;
+
+ if (schedule_first) {
+ qed_schedule_iov(&cdev->hwfns[i],
+ QED_IOV_WQ_STOP_WQ_FLAG);
+ cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+ }
+
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+ destroy_workqueue(cdev->hwfns[i].iov_wq);
+ }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ char name[NAME_SIZE];
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* PFs needs a dedicated workqueue only if they support IOV.
+ * VFs always require one.
+ */
+ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+ continue;
+
+ snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+ p_hwfn->iov_wq = create_singlethread_workqueue(name);
+ if (!p_hwfn->iov_wq) {
+ DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+ return -ENOMEM;
+ }
+
+ if (IS_PF(cdev))
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+ else
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+ }
+
+ return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+ .configure = &qed_sriov_configure,
+ .set_mac = &qed_sriov_pf_set_mac,
+ .set_vlan = &qed_sriov_pf_set_vlan,
+ .get_config = &qed_get_vf_config,
+ .set_link_state = &qed_set_vf_link_state,
+ .set_spoof = &qed_spoof_configure,
+ .set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 000000000..c90b2b6ad
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,388 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+#define QED_VF_ARRAY_LENGTH (3)
+
+#ifdef CONFIG_QED_SRIOV
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+ QED_IOV_VP_UPDATE_ACTIVATE,
+ QED_IOV_VP_UPDATE_VLAN_STRIP,
+ QED_IOV_VP_UPDATE_TX_SWITCH,
+ QED_IOV_VP_UPDATE_MCAST,
+ QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+ QED_IOV_VP_UPDATE_RSS,
+ QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+ QED_IOV_VP_UPDATE_SGE_TPA,
+ QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+ u8 mac[ETH_ALEN];
+
+ /* IFLA_VF_LINK_STATE_<X> */
+ int link_state;
+
+ /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+ int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+ u8 fw_cid;
+ u8 rxq_active;
+ u8 txq_active;
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ENABLED, /* VF, Enabled */
+ VF_RESET, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct qed_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+ struct qed_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ u8 to_disable;
+
+ struct qed_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance;
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+ u8 num_active_rxqs;
+ struct qed_public_vf_info p_vf_info;
+ bool spoof_chk;
+ bool req_spoofchk_val;
+
+ /* Stores the configuration requested by VF */
+ struct qed_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+ struct qed_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[QED_VF_ARRAY_LENGTH];
+ u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo, union event_ring_data *data)
+{
+ return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i) \
+ for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 000000000..72e69c0ec
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1102 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in qed_send_msg2pf().
+ * So, qed_vf_pf_prep() and qed_send_msg2pf()
+ * must come in sequence.
+ */
+ mutex_lock(&(p_iov->mutex));
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "preparing to send 0x%04x tlv over vf pf channel\n",
+ type);
+
+ /* Reset Requst offset */
+ p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+ /* Clear mailbox - both request and reply */
+ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ int rc = 0, time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ qed_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ wmb();
+
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ msleep(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = -EBUSY;
+ goto exit;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+exit:
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+#define VF_ACQUIRE_MAC_FILTERS 1
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ u8 rx_count = 1, tx_count = 1, num_sbs = 1;
+ u8 num_mac = VF_ACQUIRE_MAC_FILTERS;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+
+ /* starting filling the request */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ req->resc_request.num_rxqs = rx_count;
+ req->resc_request.num_txqs = tx_count;
+ req->resc_request.num_sbs = num_sbs;
+ req->resc_request.num_mac_filters = num_mac;
+ req->resc_request.num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ DP_INFO(p_hwfn,
+ "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+ return -EINVAL;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+ resources_acquired = true;
+ } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullfill resource request. Try PF recommended amount\n");
+
+ /* humble our request */
+ req->resc_request.num_txqs = resp->resc.num_txqs;
+ req->resc_request.num_rxqs = resp->resc.num_rxqs;
+ req->resc_request.num_sbs = resp->resc.num_sbs;
+ req->resc_request.num_mac_filters =
+ resp->resc.num_mac_filters;
+ req->resc_request.num_vlan_filters =
+ resp->resc.num_vlan_filters;
+
+ /* Clear response buffer */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+ p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+ p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_NOTICE(p_hwfn, "100g VF\n");
+ p_hwfn->cdev->num_hwfns = 2;
+ }
+ }
+
+ return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov;
+ u32 reg;
+
+ /* Set number of hwfns - might be overriden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->num_hwfns = 1;
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ &p_iov->vf2pf_request_phys,
+ GFP_KERNEL);
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ &p_iov->pf2vf_reply_phys,
+ GFP_KERNEL);
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+ p_iov->vf2pf_request,
+ (u64) p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+ p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov->bulletin.size,
+ &p_iov->bulletin.phys,
+ GFP_KERNEL);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt,
+ (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+ mutex_init(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+ return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+ kfree(p_iov);
+
+ return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_qid,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ /* Learn the address of the producer from the response */
+ if (pp_prod) {
+ u64 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ (u32 *)&init_prod_val);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_start_txq_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = tx_queue_id;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ if (pp_doorbell) {
+ u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
+
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
+ qed_db_addr(cid, DQ_DEMS_LEGACY);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = tx_qid;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+ return !!p_data->update_tx_switching_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+ tlv);
+ return false;
+ }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+ if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+ tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV[%d] Configuration %s\n",
+ tlv,
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ memcpy(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct qed_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = qed_add_tlv(p_hwfn,
+ &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+ memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+ sizeof(rss_params->rss_ind_table));
+ memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+ return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+ struct qed_sp_vport_update_params sp_params;
+ int i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, sp_params.bins);
+ }
+ }
+
+ qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_ucast)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8) p_ucast->opcode;
+ req->type = (u8) p_ucast->type;
+ memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct qed_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return 0;
+
+ /* Verify the bulletin we see is valid */
+ crc = crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return -EAGAIN;
+
+ /* Set the shadow bulletin and process it */
+ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+ __qed_vf_get_link_params(p_hwfn, params,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+ __qed_vf_get_link_state(p_hwfn, link,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+ __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+ memcpy(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+ struct qed_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (ether_addr_equal(bulletin->mac, mac))
+ return false;
+
+ return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ ether_addr_copy(dst_mac, bulletin->mac);
+
+ return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+ struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+ u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced && cookie)
+ ops->force_mac(cookie, mac);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ u8 change = 0;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ /* Handle bulletin board changes */
+ qed_vf_read_bulletin(hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(hwfn);
+
+ /* As VF is polling bulletin board, need to constantly re-schedule */
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 000000000..b82fda964
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,990 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id;
+ u8 sb_qid;
+ u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE 1024
+
+enum {
+ PFVF_STATUS_WAITING,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 padding[5];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
+#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's qed_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+ u8 padding2[2];
+ } pfdev_info;
+
+ struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of qed_mcp_link_state,
+ * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u8 padding4[6];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct qed_bulletin {
+ dma_addr_t phys;
+ struct qed_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ struct mutex mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct qed_bulletin bulletin;
+ struct qed_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid - zero based within the VF
+ * @param rx_queue_id - zero based within the VF
+ * @param sb - VF status block for this queue
+ * @param sb_index - Index within the status block
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id - zero based within the VF
+ * @param sb - status block for this queue
+ * @param sb_index - index within the status block
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_adr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param)
+{
+ return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params
+ *p_params,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d02325154..47d6b2225 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -25,15 +25,13 @@
#define QEDE_MAJOR_VERSION 8
#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_REVISION_VERSION 1
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
__stringify(QEDE_ENGINEERING_VERSION)
-#define QEDE_ETH_INTERFACE_VERSION 300
-
#define DRV_MODULE_SYM qede
struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -114,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
+ u32 flags;
+#define QEDE_FLAG_IS_VF BIT(0)
+#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
@@ -156,6 +158,10 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+ u32 rss_params_inited; /* bit-field to track initialized rss params */
struct qed_update_vport_rss_params rss_params;
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +173,8 @@ struct qede_dev {
bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
enum QEDE_STATE {
@@ -286,8 +294,11 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
-#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_VXLAN_PORT_CONFIG 2
+#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args {
u16 mtu;
@@ -301,6 +312,10 @@ void qede_reload(struct qede_dev *edev,
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+ u8 count);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10ce..ad3cae3b7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/pci.h>
@@ -27,6 +28,9 @@
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
#define QEDE_RQSTAT(stat_name) \
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
@@ -59,16 +63,16 @@ static const struct {
QEDE_STAT(tx_bcast_pkts),
QEDE_PF_STAT(rx_64_byte_packets),
- QEDE_PF_STAT(rx_127_byte_packets),
- QEDE_PF_STAT(rx_255_byte_packets),
- QEDE_PF_STAT(rx_511_byte_packets),
- QEDE_PF_STAT(rx_1023_byte_packets),
- QEDE_PF_STAT(rx_1518_byte_packets),
- QEDE_PF_STAT(rx_1522_byte_packets),
- QEDE_PF_STAT(rx_2047_byte_packets),
- QEDE_PF_STAT(rx_4095_byte_packets),
- QEDE_PF_STAT(rx_9216_byte_packets),
- QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +120,39 @@ static const struct {
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+};
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@@ -139,6 +171,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+ if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ }
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@@ -176,8 +219,21 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- return num_stats + QEDE_NUM_RQSTATS;
+ if (IS_VF(edev)) {
+ int i;
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_stats_arr[i].pf_only)
+ num_stats--;
+ }
+ return num_stats + QEDE_NUM_RQSTATS;
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +241,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
}
}
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +280,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Link parameters can not be changed in non-default mode\n");
+ "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -328,6 +391,12 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_output current_link;
struct qed_link_params link_params;
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
if (!netif_running(dev))
return 0;
@@ -428,9 +497,9 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Pause parameters can not be updated in non-default mode\n");
+ "Pause settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -569,6 +638,497 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = edev->num_rss;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return qede_get_rss_flags(edev, info);
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+ ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+ set_caps;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return qede_set_rss_flags(edev, info);
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_params.rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_params.rss_key,
+ qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_params.rss_key, key,
+ qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_rss(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx, val;
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod++;
+ /* 'next page' entries are counted in the producer value */
+ val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+ txq->tx_db.data.bd_prod = val;
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons++;
+ txq->sw_tx_ring[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ u8 *data_ptr;
+ int i;
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_has_rx_work(rxq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_has_rx_work(rxq)) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD before reading hw_comp_cons. If the CQE is read before it is
+ * written by FW, then FW writes CQE and SB, and then the CPU reads the
+ * hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset + sw_rx_data->page_offset);
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ DP_NOTICE(edev, "Loopback test failed\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -1;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+ return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* prepare the loopback packet */
+ pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -584,13 +1144,47 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_strings = qede_get_strings,
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+};
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
- dev->ethtool_ops = &qede_ethtool_ops;
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 12f661579..f8e11f953 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,12 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
+#ifdef CONFIG_QEDE_VXLAN
#include <net/vxlan.h>
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+#include <net/geneve.h>
+#endif
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -58,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -66,15 +72,24 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
static const struct pci_device_id qede_pci_tbl[] = {
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -89,17 +104,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ int rc;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ struct qed_update_vport_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, &params);
+ }
+
+ return rc;
+}
+#endif
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
};
+static void qede_force_mac(void *dev, u8 *mac)
+{
+ struct qede_dev *edev = dev;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
+ .force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +226,10 @@ static
int __init qede_init(void)
{
int ret;
- u32 qed_ver;
pr_notice("qede_init: %s\n", version);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
- if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
- pr_notice("Version mismatch [%08x != %08x]\n",
- qed_ver,
- QEDE_ETH_INTERFACE_VERSION);
- return -EINVAL;
- }
-
- qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
return -EINVAL;
@@ -319,6 +395,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
+ if (skb->encapsulation)
+ rc |= XMIT_ENC;
+
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -380,6 +459,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +479,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) {
int hlen;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
/* linear payload would require its own BD */
if (skb_headlen(skb) > hlen)
@@ -499,7 +587,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ } else {
+ /* In cases when OS doesn't indicate for inner offloads
+ * when packet is tunnelled, we need to override the HW
+ * tunnel configuration so that packets are treated as
+ * regular non tunnelled packets and no inner offloads
+ * are done by the hardware.
+ */
+ first_bd->data.bitfields |= cpu_to_le16(temp);
+ }
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -515,10 +614,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
@@ -644,7 +748,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -727,7 +831,7 @@ static int qede_tx_int(struct qede_dev *edev,
return 0;
}
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
{
u16 hw_comp_cons, sw_comp_cons;
@@ -782,8 +886,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
{
struct sw_rx_data *curr_cons;
@@ -818,7 +922,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
- atomic_inc(&curr_cons->data->_count);
+ page_ref_inc(curr_cons->data);
qede_reuse_page(edev, rxq, curr_cons);
}
@@ -879,6 +983,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+ skb->csum_level = 1;
}
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -931,7 +1038,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
- atomic_inc(&current_bd->data->_count);
+ page_ref_inc(current_bd->data);
goto out;
}
@@ -971,8 +1078,7 @@ static void qede_tpa_start(struct qede_dev *edev,
* start until its over and we don't want to risk allocation failing
* here, so re-allocate when aggregation will be over.
*/
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(replace_buf, mapping));
+ sw_rx_data_prod->mapping = replace_buf->mapping;
sw_rx_data_prod->data = replace_buf->data;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@@ -1188,13 +1294,47 @@ err:
tpa_info->skb = NULL;
}
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 csum = 0;
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1349,14 @@ static u8 qede_check_csum(u16 flag)
return csum;
}
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1340,7 +1488,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
* freeing SKB.
*/
- atomic_inc(&sw_rx_data->data->_count);
+ page_ref_inc(sw_rx_data->data);
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, edev,
fp_cqe->bd_num);
@@ -1569,16 +1717,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
- edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
- edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
- edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
- edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
- edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
- edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
- edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
- edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
- edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+ edev->stats.rx_128_to_255_byte_packets =
+ stats.rx_128_to_255_byte_packets;
+ edev->stats.rx_256_to_511_byte_packets =
+ stats.rx_256_to_511_byte_packets;
+ edev->stats.rx_512_to_1023_byte_packets =
+ stats.rx_512_to_1023_byte_packets;
+ edev->stats.rx_1024_to_1518_byte_packets =
+ stats.rx_1024_to_1518_byte_packets;
+ edev->stats.rx_1519_to_1522_byte_packets =
+ stats.rx_1519_to_1522_byte_packets;
+ edev->stats.rx_1519_to_2047_byte_packets =
+ stats.rx_1519_to_2047_byte_packets;
+ edev->stats.rx_2048_to_4095_byte_packets =
+ stats.rx_2048_to_4095_byte_packets;
+ edev->stats.rx_4096_to_9216_byte_packets =
+ stats.rx_4096_to_9216_byte_packets;
+ edev->stats.rx_9217_to_16383_byte_packets =
+ stats.rx_9217_to_16383_byte_packets;
edev->stats.rx_crc_errors = stats.rx_crc_errors;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1809,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@@ -1893,6 +2093,99 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_QEDE_VXLAN
+static void qede_add_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_vxlan_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d", t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
+#ifdef CONFIG_QEDE_GENEVE
+static void qede_add_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_del_geneve_port(struct net_device *dev,
+ sa_family_t sa_family, __be16 port)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(port);
+
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d", t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+#endif
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1901,9 +2194,28 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+#ifdef CONFIG_QEDE_VXLAN
+ .ndo_add_vxlan_port = qede_add_vxlan_port,
+ .ndo_del_vxlan_port = qede_del_vxlan_port,
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ .ndo_add_geneve_port = qede_add_geneve_port,
+ .ndo_del_geneve_port = qede_del_geneve_port,
+#endif
};
/* -------------------------------------------------------------------------
@@ -1974,6 +2286,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* Encap features*/
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2394,8 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ struct qed_dev *cdev = edev->cdev;
+
mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2403,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev);
}
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = edev->geneve_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
mutex_unlock(&edev->qede_lock);
}
@@ -2099,8 +2439,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
- enum qede_probe_mode mode)
+ bool is_vf, enum qede_probe_mode mode)
{
+ struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@@ -2110,8 +2451,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
- cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
- dp_module, dp_level);
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@@ -2145,6 +2490,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
+ if (is_vf)
+ edev->flags |= QEDE_FLAG_IS_VF;
+
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@@ -2176,12 +2524,24 @@ err0:
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
qede_config_debug(debug, &dp_module, &dp_level);
- return __qede_probe(pdev, dp_module, dp_level,
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@@ -2320,7 +2680,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
- dma_unmap_addr(replace_buf, mapping),
+ replace_buf->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(replace_buf->data);
}
@@ -2420,7 +2780,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
goto err;
}
- dma_unmap_addr_set(replace_buf, mapping, mapping);
+ replace_buf->mapping = mapping;
tpa_info->replace_buf.page_offset = 0;
tpa_info->replace_buf_mapping = mapping;
@@ -2871,15 +3231,16 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
+ bool reset_rss_indir = false;
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2971,19 +3332,59 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
+ if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
- for (i = 0; i < 128; i++)
- rss_params->rss_ind_table[i] =
- ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
- netdev_rss_key_fill(rss_params->rss_key,
- sizeof(rss_params->rss_key));
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_params.rss_ind_table[i] >=
+ edev->num_rss) {
+ reset_rss_indir = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+ reset_rss_indir) {
+ u16 val;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val;
+
+ val = QEDE_RSS_CNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_params.rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_params.rss_key,
+ sizeof(edev->rss_params.rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP |
+ QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
} else {
- memset(rss_params, 0, sizeof(*rss_params));
+ memset(&vport_update_params.rss_params, 0,
+ sizeof(vport_update_params.rss_params));
}
- memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
@@ -3061,6 +3462,7 @@ out:
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3099,7 +3501,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3154,7 +3556,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
@@ -3165,12 +3567,24 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- return qede_load(edev, QEDE_LOAD_NORMAL);
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_QEDE_VXLAN
+ vxlan_get_rx_port(ndev);
+#endif
+#ifdef CONFIG_QEDE_GENEVE
+ geneve_get_rx_port(ndev);
+#endif
+ return 0;
}
static int qede_close(struct net_device *ndev)
@@ -3221,6 +3635,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4..87c642d3b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -772,6 +772,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
@@ -2220,7 +2222,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 392f193cd..9fedc8f62 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -3952,8 +3952,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
- return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
- PCI_ERS_RESULT_RECOVERED;
+ pci_ers_result_t res;
+
+ rtnl_lock();
+ res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+ rtnl_unlock();
+
+ return res;
}
static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73ea2..fd5d1c93b 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4687,7 +4687,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
@@ -4846,7 +4846,6 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4872,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
@@ -4880,6 +4880,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d..6e2add979 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
qca->stats.ring_full++;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (qca->spi_thread &&
qca->spi_thread->state != TASK_RUNNING)
@@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
struct qcaspi *qca = netdev_priv(dev);
netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index d77d60ea8..5cb96785f 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index e894dc7c9..e4030c3d7 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -345,7 +345,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
static int rx_buf_sz = 16383;
-static int use_dac;
+static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -8206,20 +8206,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = 0;
-
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
-
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
@@ -8235,6 +8221,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ tp->cp_cmd = 0;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
+ tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
+ if (!pci_is_pcie(pdev))
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_unmap_4;
+ }
+ }
+
rtl_init_rxcfg(tp);
rtl_irq_disable(tp);
@@ -8394,12 +8399,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&tp->counters_phys_addr, GFP_KERNEL);
if (!tp->counters) {
rc = -ENOMEM;
- goto err_out_msi_4;
+ goto err_out_msi_5;
}
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_5;
+ goto err_out_cnt_6;
pci_set_drvdata(pdev, dev);
@@ -8433,12 +8438,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
-err_out_cnt_5:
+err_out_cnt_6:
dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
tp->counters_phys_addr);
-err_out_msi_4:
+err_out_msi_5:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
+err_out_unmap_4:
iounmap(ioaddr);
err_out_free_res_3:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2160d1b9..4e5d5e953 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -157,6 +157,7 @@ enum ravb_reg {
TIC = 0x0378,
TIS = 0x037C,
ISS = 0x0380,
+ CIE = 0x0384, /* R-Car Gen3 only */
GCCR = 0x0390,
GMTT = 0x0394,
GPTC = 0x0398,
@@ -170,6 +171,15 @@ enum ravb_reg {
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
+ GIE = 0x03CC, /* R-Car Gen3 only */
+ GID = 0x03D0, /* R-Car Gen3 only */
+ DIL = 0x0440, /* R-Car Gen3 only */
+ RIE0 = 0x0460, /* R-Car Gen3 only */
+ RID0 = 0x0464, /* R-Car Gen3 only */
+ RIE2 = 0x0470, /* R-Car Gen3 only */
+ RID2 = 0x0474, /* R-Car Gen3 only */
+ TIE = 0x0478, /* R-Car Gen3 only */
+ TID = 0x047c, /* R-Car Gen3 only */
/* E-MAC registers */
ECMR = 0x0500,
@@ -556,6 +566,16 @@ enum ISS_BIT {
ISS_DPS15 = 0x80000000,
};
+/* CIE (R-Car Gen3 only) */
+enum CIE_BIT {
+ CIE_CRIE = 0x00000001,
+ CIE_CTIE = 0x00000100,
+ CIE_RQFM = 0x00010000,
+ CIE_CL0M = 0x00020000,
+ CIE_RFWL = 0x00040000,
+ CIE_RFFL = 0x00080000,
+};
+
/* GCCR */
enum GCCR_BIT {
GCCR_TCR = 0x00000003,
@@ -592,6 +612,188 @@ enum GIS_BIT {
GIS_PTMF = 0x00000004,
};
+/* GIE (R-Car Gen3 only) */
+enum GIE_BIT {
+ GIE_PTCS = 0x00000001,
+ GIE_PTOS = 0x00000002,
+ GIE_PTMS0 = 0x00000004,
+ GIE_PTMS1 = 0x00000008,
+ GIE_PTMS2 = 0x00000010,
+ GIE_PTMS3 = 0x00000020,
+ GIE_PTMS4 = 0x00000040,
+ GIE_PTMS5 = 0x00000080,
+ GIE_PTMS6 = 0x00000100,
+ GIE_PTMS7 = 0x00000200,
+ GIE_ATCS0 = 0x00010000,
+ GIE_ATCS1 = 0x00020000,
+ GIE_ATCS2 = 0x00040000,
+ GIE_ATCS3 = 0x00080000,
+ GIE_ATCS4 = 0x00100000,
+ GIE_ATCS5 = 0x00200000,
+ GIE_ATCS6 = 0x00400000,
+ GIE_ATCS7 = 0x00800000,
+ GIE_ATCS8 = 0x01000000,
+ GIE_ATCS9 = 0x02000000,
+ GIE_ATCS10 = 0x04000000,
+ GIE_ATCS11 = 0x08000000,
+ GIE_ATCS12 = 0x10000000,
+ GIE_ATCS13 = 0x20000000,
+ GIE_ATCS14 = 0x40000000,
+ GIE_ATCS15 = 0x80000000,
+};
+
+/* GID (R-Car Gen3 only) */
+enum GID_BIT {
+ GID_PTCD = 0x00000001,
+ GID_PTOD = 0x00000002,
+ GID_PTMD0 = 0x00000004,
+ GID_PTMD1 = 0x00000008,
+ GID_PTMD2 = 0x00000010,
+ GID_PTMD3 = 0x00000020,
+ GID_PTMD4 = 0x00000040,
+ GID_PTMD5 = 0x00000080,
+ GID_PTMD6 = 0x00000100,
+ GID_PTMD7 = 0x00000200,
+ GID_ATCD0 = 0x00010000,
+ GID_ATCD1 = 0x00020000,
+ GID_ATCD2 = 0x00040000,
+ GID_ATCD3 = 0x00080000,
+ GID_ATCD4 = 0x00100000,
+ GID_ATCD5 = 0x00200000,
+ GID_ATCD6 = 0x00400000,
+ GID_ATCD7 = 0x00800000,
+ GID_ATCD8 = 0x01000000,
+ GID_ATCD9 = 0x02000000,
+ GID_ATCD10 = 0x04000000,
+ GID_ATCD11 = 0x08000000,
+ GID_ATCD12 = 0x10000000,
+ GID_ATCD13 = 0x20000000,
+ GID_ATCD14 = 0x40000000,
+ GID_ATCD15 = 0x80000000,
+};
+
+/* RIE0 (R-Car Gen3 only) */
+enum RIE0_BIT {
+ RIE0_FRS0 = 0x00000001,
+ RIE0_FRS1 = 0x00000002,
+ RIE0_FRS2 = 0x00000004,
+ RIE0_FRS3 = 0x00000008,
+ RIE0_FRS4 = 0x00000010,
+ RIE0_FRS5 = 0x00000020,
+ RIE0_FRS6 = 0x00000040,
+ RIE0_FRS7 = 0x00000080,
+ RIE0_FRS8 = 0x00000100,
+ RIE0_FRS9 = 0x00000200,
+ RIE0_FRS10 = 0x00000400,
+ RIE0_FRS11 = 0x00000800,
+ RIE0_FRS12 = 0x00001000,
+ RIE0_FRS13 = 0x00002000,
+ RIE0_FRS14 = 0x00004000,
+ RIE0_FRS15 = 0x00008000,
+ RIE0_FRS16 = 0x00010000,
+ RIE0_FRS17 = 0x00020000,
+};
+
+/* RID0 (R-Car Gen3 only) */
+enum RID0_BIT {
+ RID0_FRD0 = 0x00000001,
+ RID0_FRD1 = 0x00000002,
+ RID0_FRD2 = 0x00000004,
+ RID0_FRD3 = 0x00000008,
+ RID0_FRD4 = 0x00000010,
+ RID0_FRD5 = 0x00000020,
+ RID0_FRD6 = 0x00000040,
+ RID0_FRD7 = 0x00000080,
+ RID0_FRD8 = 0x00000100,
+ RID0_FRD9 = 0x00000200,
+ RID0_FRD10 = 0x00000400,
+ RID0_FRD11 = 0x00000800,
+ RID0_FRD12 = 0x00001000,
+ RID0_FRD13 = 0x00002000,
+ RID0_FRD14 = 0x00004000,
+ RID0_FRD15 = 0x00008000,
+ RID0_FRD16 = 0x00010000,
+ RID0_FRD17 = 0x00020000,
+};
+
+/* RIE2 (R-Car Gen3 only) */
+enum RIE2_BIT {
+ RIE2_QFS0 = 0x00000001,
+ RIE2_QFS1 = 0x00000002,
+ RIE2_QFS2 = 0x00000004,
+ RIE2_QFS3 = 0x00000008,
+ RIE2_QFS4 = 0x00000010,
+ RIE2_QFS5 = 0x00000020,
+ RIE2_QFS6 = 0x00000040,
+ RIE2_QFS7 = 0x00000080,
+ RIE2_QFS8 = 0x00000100,
+ RIE2_QFS9 = 0x00000200,
+ RIE2_QFS10 = 0x00000400,
+ RIE2_QFS11 = 0x00000800,
+ RIE2_QFS12 = 0x00001000,
+ RIE2_QFS13 = 0x00002000,
+ RIE2_QFS14 = 0x00004000,
+ RIE2_QFS15 = 0x00008000,
+ RIE2_QFS16 = 0x00010000,
+ RIE2_QFS17 = 0x00020000,
+ RIE2_RFFS = 0x80000000,
+};
+
+/* RID2 (R-Car Gen3 only) */
+enum RID2_BIT {
+ RID2_QFD0 = 0x00000001,
+ RID2_QFD1 = 0x00000002,
+ RID2_QFD2 = 0x00000004,
+ RID2_QFD3 = 0x00000008,
+ RID2_QFD4 = 0x00000010,
+ RID2_QFD5 = 0x00000020,
+ RID2_QFD6 = 0x00000040,
+ RID2_QFD7 = 0x00000080,
+ RID2_QFD8 = 0x00000100,
+ RID2_QFD9 = 0x00000200,
+ RID2_QFD10 = 0x00000400,
+ RID2_QFD11 = 0x00000800,
+ RID2_QFD12 = 0x00001000,
+ RID2_QFD13 = 0x00002000,
+ RID2_QFD14 = 0x00004000,
+ RID2_QFD15 = 0x00008000,
+ RID2_QFD16 = 0x00010000,
+ RID2_QFD17 = 0x00020000,
+ RID2_RFFD = 0x80000000,
+};
+
+/* TIE (R-Car Gen3 only) */
+enum TIE_BIT {
+ TIE_FTS0 = 0x00000001,
+ TIE_FTS1 = 0x00000002,
+ TIE_FTS2 = 0x00000004,
+ TIE_FTS3 = 0x00000008,
+ TIE_TFUS = 0x00000100,
+ TIE_TFWS = 0x00000200,
+ TIE_MFUS = 0x00000400,
+ TIE_MFWS = 0x00000800,
+ TIE_TDPS0 = 0x00010000,
+ TIE_TDPS1 = 0x00020000,
+ TIE_TDPS2 = 0x00040000,
+ TIE_TDPS3 = 0x00080000,
+};
+
+/* TID (R-Car Gen3 only) */
+enum TID_BIT {
+ TID_FTD0 = 0x00000001,
+ TID_FTD1 = 0x00000002,
+ TID_FTD2 = 0x00000004,
+ TID_FTD3 = 0x00000008,
+ TID_TFUD = 0x00000100,
+ TID_TFWD = 0x00000200,
+ TID_MFUD = 0x00000400,
+ TID_MFWD = 0x00000800,
+ TID_TDPD0 = 0x00010000,
+ TID_TDPD1 = 0x00020000,
+ TID_TDPD2 = 0x00040000,
+ TID_TDPD3 = 0x00080000,
+};
+
/* ECMR */
enum ECMR_BIT {
ECMR_PRM = 0x00000001,
@@ -817,6 +1019,8 @@ struct ravb_private {
int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
+ int rx_irqs[NUM_RX_QUEUE];
+ int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -841,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set);
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_interrupt(struct net_device *ndev);
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void ravb_ptp_stop(struct net_device *ndev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4277d0c12..867caf6e7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -42,6 +42,16 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
+ "ch0", /* RAVB_BE */
+ "ch1", /* RAVB_NC */
+};
+
+static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
+ "ch18", /* RAVB_BE */
+ "ch19", /* RAVB_NC */
+};
+
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -236,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- /* The size of the buffer should be on 16-byte boundary. */
- rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -365,6 +374,7 @@ static void ravb_emac_init(struct net_device *ndev)
/* Device init function for Ethernet AVB */
static int ravb_dmac_init(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
/* Set CONFIG mode */
@@ -401,6 +411,12 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, TCCR_TFEN, TCCR);
/* Interrupt init: */
+ if (priv->chip_id == RCAR_GEN3) {
+ /* Clear DIL.DPLx */
+ ravb_write(ndev, 0, DIL);
+ /* Set queue specific interrupt */
+ ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
+ }
/* Frame receive */
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
/* Disable FIFO full warning */
@@ -541,7 +557,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -571,8 +587,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- /* The size of the buffer should be on 16-byte boundary. */
- desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
@@ -643,7 +658,7 @@ static int ravb_stop_dma(struct net_device *ndev)
}
/* E-MAC interrupt handler */
-static void ravb_emac_interrupt(struct net_device *ndev)
+static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 ecsr, psr;
@@ -669,6 +684,18 @@ static void ravb_emac_interrupt(struct net_device *ndev)
}
}
+static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->lock);
+ ravb_emac_interrupt_unlocked(ndev);
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
/* Error interrupt handler */
static void ravb_error_interrupt(struct net_device *ndev)
{
@@ -695,6 +722,50 @@ static void ravb_error_interrupt(struct net_device *ndev)
}
}
+static bool ravb_queue_interrupt(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+
+ if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ } else {
+ ravb_write(ndev, BIT(q), RID0);
+ ravb_write(ndev, BIT(q), TID);
+ }
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool ravb_timestamp_interrupt(struct net_device *ndev)
+{
+ u32 tis = ravb_read(ndev, TIS);
+
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ return true;
+ }
+ return false;
+}
+
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
@@ -708,63 +779,102 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
/* Received and transmitted interrupts */
if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
- u32 ris0 = ravb_read(ndev, RIS0);
- u32 ric0 = ravb_read(ndev, RIC0);
- u32 tis = ravb_read(ndev, TIS);
- u32 tic = ravb_read(ndev, TIC);
int q;
/* Timestamp updated */
- if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
- ravb_get_tx_tstamp(ndev);
+ if (ravb_timestamp_interrupt(ndev))
result = IRQ_HANDLED;
- }
/* Network control and best effort queue RX/TX */
for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (((ris0 & ric0) & BIT(q)) ||
- ((tis & tic) & BIT(q))) {
- if (napi_schedule_prep(&priv->napi[q])) {
- /* Mask RX and TX interrupts */
- ric0 &= ~BIT(q);
- tic &= ~BIT(q);
- ravb_write(ndev, ric0, RIC0);
- ravb_write(ndev, tic, TIC);
- __napi_schedule(&priv->napi[q]);
- } else {
- netdev_warn(ndev,
- "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
- ris0, ric0);
- netdev_warn(ndev,
- " tx status 0x%08x, tx mask 0x%08x.\n",
- tis, tic);
- }
+ if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
- }
}
}
/* E-MAC status summary */
if (iss & ISS_MS) {
- ravb_emac_interrupt(ndev);
+ ravb_emac_interrupt_unlocked(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
}
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+/* Timestamp/Error/gPTP interrupt handler */
+static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Timestamp updated */
+ if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
+ result = IRQ_HANDLED;
+
/* Error status summary */
if (iss & ISS_ES) {
ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
- if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
+ }
mmiowb();
spin_unlock(&priv->lock);
return result;
}
+static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ /* Network control/Best effort queue RX/TX */
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
+}
+
+static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
+}
+
static int ravb_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
@@ -804,8 +914,13 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1208,35 +1323,72 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ts_info = ravb_get_ts_info,
};
+static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
+ struct net_device *ndev, struct device *dev,
+ const char *ch)
+{
+ char *name;
+ int error;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+ error = request_irq(irq, handler, 0, name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", name);
+
+ return error;
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]);
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
- ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
-
- if (priv->chip_id == RCAR_GEN3) {
- error = request_irq(priv->emac_irq, ravb_interrupt,
- IRQF_SHARED, ndev->name, ndev);
+ if (priv->chip_id == RCAR_GEN2) {
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
if (error) {
netdev_err(ndev, "cannot request IRQ\n");
- goto out_free_irq;
+ goto out_napi_off;
}
+ } else {
+ error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
+ dev, "ch22:multi");
+ if (error)
+ goto out_napi_off;
+ error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
+ dev, "ch24:emac");
+ if (error)
+ goto out_free_irq;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch0:rx_be");
+ if (error)
+ goto out_free_irq_emac;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch18:tx_be");
+ if (error)
+ goto out_free_irq_be_rx;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch1:rx_nc");
+ if (error)
+ goto out_free_irq_be_tx;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch19:tx_nc");
+ if (error)
+ goto out_free_irq_nc_rx;
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq2;
+ goto out_free_irq_nc_tx;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1256,9 +1408,18 @@ out_ptp_stop:
/* Stop PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_stop(ndev);
-out_free_irq2:
- if (priv->chip_id == RCAR_GEN3)
- free_irq(priv->emac_irq, ndev);
+out_free_irq_nc_tx:
+ if (priv->chip_id == RCAR_GEN2)
+ goto out_free_irq;
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+out_free_irq_nc_rx:
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+out_free_irq_be_tx:
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+out_free_irq_be_rx:
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
+out_free_irq_emac:
+ free_irq(priv->emac_irq, ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
@@ -1506,8 +1667,13 @@ static int ravb_close(struct net_device *ndev)
priv->phydev = NULL;
}
- if (priv->chip_id == RCAR_GEN3)
+ if (priv->chip_id != RCAR_GEN2) {
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ }
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi[RAVB_NC]);
@@ -1718,6 +1884,7 @@ static int ravb_probe(struct platform_device *pdev)
struct net_device *ndev;
int error, irq, q;
struct resource *res;
+ int i;
if (!np) {
dev_err(&pdev->dev,
@@ -1787,6 +1954,22 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
priv->emac_irq = irq;
+ for (i = 0; i < NUM_RX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->rx_irqs[i] = irq;
+ }
+ for (i = 0; i < NUM_TX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->tx_irqs[i] = irq;
+ }
}
priv->chip_id = chip_id;
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 57992ccc4..eede70ec3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ else if (on)
+ ravb_write(ndev, GIE_PTCS, GIE);
+ else
+ ravb_write(ndev, GID_PTCD, GID);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ else
+ ravb_write(ndev, GIE_PTMS0, GIE);
}
} else {
spin_lock_irqsave(&priv->lock, flags);
@@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, 0);
+ else
+ ravb_write(ndev, GID_PTMD0, GID);
}
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -285,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
};
/* Caller must hold the lock */
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+void ravb_ptp_interrupt(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 gis = ravb_read(ndev, GIS);
@@ -308,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
}
}
- if (gis) {
- ravb_write(ndev, ~gis, GIS);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
+ ravb_write(ndev, ~gis, GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ceea74cc2..04cd39f66 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -482,7 +482,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
/* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+ sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
mdelay(1);
}
@@ -537,11 +537,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- /* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
- mdelay(1);
+ sh_eth_chip_reset(ndev);
sh_eth_select_mii(ndev);
}
@@ -725,8 +721,8 @@ static struct sh_eth_cpu_data sh7757_data = {
#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
- int i;
u32 mahr[2], malr[2];
+ int i;
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -734,9 +730,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
mahr[i] = ioread32((void *)GIGA_MAHR(i));
}
- /* reset device */
- iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
- mdelay(1);
+ sh_eth_chip_reset(ndev);
/* restore MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -899,7 +893,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
int cnt = 100;
while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
break;
mdelay(1);
cnt--;
@@ -1229,7 +1223,7 @@ ring_free:
return -ENOMEM;
}
-static int sh_eth_dev_init(struct net_device *ndev, bool start)
+static int sh_eth_dev_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -1279,10 +1273,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_modify(ndev, EESR, 0, 0);
- if (start) {
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- }
+ mdp->irq_enabled = true;
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
@@ -1295,8 +1287,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- if (start)
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -1309,10 +1300,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- if (start) {
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
- }
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
return ret;
}
@@ -2194,7 +2183,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
@@ -2246,7 +2235,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret)
goto out_free_irq;
@@ -2299,7 +2288,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
}
/* device init */
- sh_eth_dev_init(ndev, true);
+ sh_eth_dev_init(ndev);
netif_start_queue(ndev);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 8fa4ef3a7..c62380e34 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -394,7 +394,7 @@ enum RPADIR_BIT {
#define DEFAULT_FDR_INIT 0x00000707
/* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+enum ARSTR_BIT { ARSTR_ARST = 0x00000001, };
/* TSU_FWEN0 */
enum TSU_FWEN0_BIT {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index ca7336605..c2bd5378f 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -648,7 +648,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869..097f363f1 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1726,14 +1726,33 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
@@ -1744,7 +1763,10 @@ out_unlock:
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e35b..4c83739d1 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7..2a9228a6e 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321..d13ddf970 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
@@ -889,9 +893,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -1035,7 +1039,7 @@ struct efx_nic {
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2..02b0b5272 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 5eac523b4..aaa80f138 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev)
mace->eth.dma_ctrl = priv->dma_ctrl;
meth_add_to_tx_ring(priv, skb);
- dev->trans_start = jiffies; /* save the timestamp */
+ netif_trans_update(dev); /* save the timestamp */
/* If TX ring is full, tell the upper layer to stop sending packets */
if (meth_tx_full(dev)) {
@@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fd812d2e5..95001ee40 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(net_dev); /* prevent tx timeout */
/* load Transmit Descriptor Register */
sw32(txdp, sis_priv->tx_ring_dma);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 443f1da9f..7186b8926 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev)
ew32(COMMAND, TxQueued);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index a733868a4..cb49c9654 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* DMA complete IRQ will free buffer and set jiffies */
#else
SMC_PUSH_DATA(lp, buf, len);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
#endif
if (!lp->tx_throttle) {
@@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
if (lp->pending_tx_skb != NULL)
@@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 664f59697..d496888b8 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* we can send another packet */
netif_wake_queue(dev);
@@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index e841e1eb8..e0e001566 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
smc->saved_skb = NULL;
dev_kfree_skb_irq(skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
}
@@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev)
inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index c5ed27c54..18ac52ded 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data)
SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
smc_special_unlock(&lp->lock, flags);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
@@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8af25563f..b5ab5e120 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -116,7 +116,6 @@ struct smsc911x_data {
struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
@@ -1073,7 +1072,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b3901616f..0fb362d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
- mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d25730..fc60368df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
#define DMA_TX_SIZE 512
#define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@ struct stmmac_extra_stats {
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
+ /* TSO */
+ unsigned long tx_tso_frames;
+ unsigned long tx_tso_nfrags;
};
/* CSR Frequency Access Defines*/
@@ -243,6 +248,7 @@ enum rx_frame_status {
csum_none = 0x2,
llc_snap = 0x4,
dma_own = 0x8,
+ rx_not_ls = 0x10,
};
/* Tx status */
@@ -269,6 +275,7 @@ enum dma_irq_status {
#define CORE_PCS_ANE_COMPLETE (1 << 5)
#define CORE_PCS_LINK_STATUS (1 << 6)
#define CORE_RGMII_IRQ (1 << 7)
+#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
struct rgmii_adv {
@@ -300,8 +307,10 @@ struct dma_features {
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
unsigned int av;
+ unsigned int tsoen;
/* TX and RX csum */
unsigned int tx_coe;
+ unsigned int rx_coe;
unsigned int rx_coe_type1;
unsigned int rx_coe_type2;
unsigned int rxfifo_over_2048;
@@ -348,6 +357,10 @@ struct stmmac_desc_ops {
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls);
+ void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+ int len2, bool tx_own, bool ls,
+ unsigned int tcphdrlen,
+ unsigned int tcppayloadlen);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +393,10 @@ struct stmmac_desc_ops {
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
int (*get_rx_timestamp_status) (void *desc, u32 ats);
+ /* Display ring */
+ void (*display_ring)(void *head, unsigned int size, bool rx);
+ /* set MSS via context descriptor */
+ void (*set_mss)(struct dma_desc *p, unsigned int mss);
};
extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +429,15 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x);
/* If supported then get the optional core features */
- unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+ void (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
};
struct mac_device_info;
@@ -463,6 +486,7 @@ struct stmmac_hwtimestamp {
};
extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
int port;
@@ -495,7 +519,6 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- unsigned int synopsys_uid;
void __iomem *pcsr; /* vpointer to device CSRs */
int multicast_filter_bins;
int unicast_filter_entries;
@@ -504,18 +527,47 @@ struct mac_device_info {
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+ int perfect_uc_entries,
+ int *synopsys_id);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
-
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
+
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
-
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
+static inline u32 stmmac_get_synopsys_id(u32 hwid)
+{
+ /* Check Synopsys Id (not available on old chips) */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index afb90d129..f13499fa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -49,6 +49,7 @@ struct socfpga_dwmac {
u32 reg_shift;
struct device *dev;
struct regmap *sys_mgr_base_addr;
+ struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
};
@@ -135,7 +136,7 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
return 0;
}
-static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
{
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
int phymode = dwmac->interface;
@@ -164,6 +165,10 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
if (dwmac->splitter_base)
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ /* Assert reset to the enet controller before changing the phy mode */
+ if (dwmac->stmmac_rst)
+ reset_control_assert(dwmac->stmmac_rst);
+
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
@@ -181,57 +186,13 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
- return 0;
-}
-
-static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct socfpga_dwmac *dwmac = priv;
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *stpriv = NULL;
- int ret = 0;
-
- if (!ndev)
- return -EINVAL;
-
- stpriv = netdev_priv(ndev);
- if (!stpriv)
- return -EINVAL;
-
- /* Assert reset to the enet controller before changing the phy mode */
- if (stpriv->stmmac_rst)
- reset_control_assert(stpriv->stmmac_rst);
-
- /* Setup the phy mode in the system manager registers according to
- * devicetree configuration
- */
- ret = socfpga_dwmac_setup(dwmac);
-
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (stpriv->stmmac_rst)
- reset_control_deassert(stpriv->stmmac_rst);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (stpriv->phydev)
- phy_resume(stpriv->phydev);
+ if (dwmac->stmmac_rst)
+ reset_control_deassert(dwmac->stmmac_rst);
- return ret;
+ return 0;
}
static int socfpga_dwmac_probe(struct platform_device *pdev)
@@ -260,23 +221,59 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ret;
- }
-
plat_dat->bsp_priv = dwmac;
- plat_dat->init = socfpga_dwmac_init;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (!ret)
- ret = socfpga_dwmac_init(pdev, dwmac);
+ if (!ret) {
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *stpriv = netdev_priv(ndev);
+
+ /* The socfpga driver needs to control the stmmac reset to
+ * set the phy mode. Create a copy of the core reset handel
+ * so it can be used by the driver later.
+ */
+ dwmac->stmmac_rst = stpriv->stmmac_rst;
+
+ ret = socfpga_dwmac_set_phy_mode(dwmac);
+ }
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+
+ /* Before the enet controller is suspended, the phy is suspended.
+ * This causes the phy clock to be gated. The enet controller is
+ * resumed before the phy, so the clock is still gated "off" when
+ * the enet controller is resumed. This code makes sure the phy
+ * is "resumed" before reinitializing the enet controller since
+ * the enet controller depends on an active phy clock to complete
+ * a DMA reset. A DMA reset will "time out" if executed
+ * with no phy clock input on the Synopsys enet controller.
+ * Verified through Synopsys Case #8000711656.
+ *
+ * Note that the phy clock is also gated when the phy is isolated.
+ * Phy "suspend" and "isolate" controls are located in phy basic
+ * control register 0, and can be modified by the phy driver
+ * framework.
+ */
+ if (priv->phydev)
+ phy_resume(priv->phydev);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+ socfpga_dwmac_resume);
+
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac" },
{ }
@@ -288,7 +285,7 @@ static struct platform_driver socfpga_dwmac_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
+ .pm = &socfpga_dwmac_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c2941172f..fb1eb578e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -491,7 +491,8 @@ static const struct stmmac_ops dwmac1000_ops = {
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries)
+ int perfect_uc_entries,
+ int *synopsys_id)
{
struct mac_device_info *mac;
u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +517,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->link.speed = GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
- mac->synopsys_uid = hwid;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d6037..990746955 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
}
}
-static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
- return readl(ioaddr + DMA_HW_FEATURE);
+ u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+
+ dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+ dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773f2..6418b2e07 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
{
struct mac_device_info *mac;
@@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
mac->link.speed = 0;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
- mac->synopsys_uid = 0;
+ /* Synopsys Id is not available on old chips */
+ *synopsys_id = 0;
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000..bc50952a1
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,255 @@
+/*
+ * DWMAC4 Header file.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_H__
+#define __DWMAC4_H__
+
+#include "common.h"
+
+/* MAC registers */
+#define GMAC_CONFIG 0x00000000
+#define GMAC_PACKET_FILTER 0x00000008
+#define GMAC_HASH_TAB_0_31 0x00000010
+#define GMAC_HASH_TAB_32_63 0x00000014
+#define GMAC_RX_FLOW_CTRL 0x00000090
+#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_INT_STATUS 0x000000b0
+#define GMAC_INT_EN 0x000000b4
+#define GMAC_AN_CTRL 0x000000e0
+#define GMAC_AN_STATUS 0x000000e4
+#define GMAC_AN_ADV 0x000000e8
+#define GMAC_AN_LPA 0x000000ec
+#define GMAC_PMT 0x000000c0
+#define GMAC_VERSION 0x00000110
+#define GMAC_DEBUG 0x00000114
+#define GMAC_HW_FEATURE0 0x0000011c
+#define GMAC_HW_FEATURE1 0x00000120
+#define GMAC_HW_FEATURE2 0x00000124
+#define GMAC_MDIO_ADDR 0x00000200
+#define GMAC_MDIO_DATA 0x00000204
+#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
+#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+
+/* MAC Packet Filtering */
+#define GMAC_PACKET_FILTER_PR BIT(0)
+#define GMAC_PACKET_FILTER_HMC BIT(2)
+#define GMAC_PACKET_FILTER_PM BIT(4)
+
+#define GMAC_MAX_PERFECT_ADDRESSES 128
+
+/* MAC Flow Control RX */
+#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+
+/* MAC Flow Control TX */
+#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
+#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+
+/* MAC Interrupt bitmap*/
+#define GMAC_INT_PMT_EN BIT(4)
+#define GMAC_INT_LPI_EN BIT(5)
+
+enum dwmac4_irq_status {
+ time_stamp_irq = 0x00001000,
+ mmc_rx_csum_offload_irq = 0x00000800,
+ mmc_tx_irq = 0x00000400,
+ mmc_rx_irq = 0x00000200,
+ mmc_irq = 0x00000100,
+ pmt_irq = 0x00000010,
+ pcs_ane_irq = 0x00000004,
+ pcs_link_irq = 0x00000002,
+};
+
+/* MAC Auto-Neg bitmap*/
+#define GMAC_AN_CTRL_RAN BIT(9)
+#define GMAC_AN_CTRL_ANE BIT(12)
+#define GMAC_AN_CTRL_ELE BIT(14)
+#define GMAC_AN_FD BIT(5)
+#define GMAC_AN_HD BIT(6)
+#define GMAC_AN_PSE_MASK GENMASK(8, 7)
+#define GMAC_AN_PSE_SHIFT 7
+
+/* MAC PMT bitmap */
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* MAC Debug bitmap */
+#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT 17
+#define GMAC_DEBUG_TFCSTS_IDLE 0
+#define GMAC_DEBUG_TFCSTS_WAIT 1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
+#define GMAC_DEBUG_TFCSTS_XFER 3
+#define GMAC_DEBUG_TPESTS BIT(16)
+#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
+#define GMAC_DEBUG_RPESTS BIT(0)
+
+/* MAC config */
+#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_2K BIT(22)
+#define GMAC_CONFIG_ACS BIT(20)
+#define GMAC_CONFIG_BE BIT(18)
+#define GMAC_CONFIG_JD BIT(17)
+#define GMAC_CONFIG_JE BIT(16)
+#define GMAC_CONFIG_PS BIT(15)
+#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_DCRS BIT(9)
+#define GMAC_CONFIG_TE BIT(1)
+#define GMAC_CONFIG_RE BIT(0)
+
+/* MAC HW features0 bitmap */
+#define GMAC_HW_FEAT_ADDMAC BIT(18)
+#define GMAC_HW_FEAT_RXCOESEL BIT(16)
+#define GMAC_HW_FEAT_TXCOSEL BIT(14)
+#define GMAC_HW_FEAT_EEESEL BIT(13)
+#define GMAC_HW_FEAT_TSSEL BIT(12)
+#define GMAC_HW_FEAT_MMCSEL BIT(8)
+#define GMAC_HW_FEAT_MGKSEL BIT(7)
+#define GMAC_HW_FEAT_RWKSEL BIT(6)
+#define GMAC_HW_FEAT_SMASEL BIT(5)
+#define GMAC_HW_FEAT_VLHASH BIT(4)
+#define GMAC_HW_FEAT_PCSSEL BIT(3)
+#define GMAC_HW_FEAT_HDSEL BIT(2)
+#define GMAC_HW_FEAT_GMIISEL BIT(1)
+#define GMAC_HW_FEAT_MIISEL BIT(0)
+
+/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_AVSEL BIT(20)
+#define GMAC_HW_TSOEN BIT(18)
+
+/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
+#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
+
+/* MAC HW ADDR regs */
+#define GMAC_HI_DCS GENMASK(18, 16)
+#define GMAC_HI_DCS_SHIFT 16
+#define GMAC_HI_REG_AE BIT(31)
+
+/* MTL registers */
+#define MTL_INT_STATUS 0x00000c20
+#define MTL_INT_Q0 BIT(0)
+
+#define MTL_CHAN_BASE_ADDR 0x00000d00
+#define MTL_CHAN_BASE_OFFSET 0x40
+#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
+ (x * MTL_CHAN_BASE_OFFSET))
+
+#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
+#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
+#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
+
+#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TSF BIT(1)
+
+#define MTL_OP_MODE_TTC_MASK 0x70
+#define MTL_OP_MODE_TTC_SHIFT 4
+
+#define MTL_OP_MODE_TTC_32 0
+#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+
+#define MTL_OP_MODE_RTC_MASK 0x18
+#define MTL_OP_MODE_RTC_SHIFT 3
+
+#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_64 0
+#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* MTL interrupt */
+#define MTL_RX_OVERFLOW_INT_EN BIT(24)
+#define MTL_RX_OVERFLOW_INT BIT(16)
+
+/* Default operating mode of the MAC */
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
+
+/* To dump the core regs excluding the Address Registers */
+#define GMAC_REG_NUM 132
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000..44da877d2
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,407 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.00 has been used for developing this code.
+ *
+ * This only implements the mac core functions for this chip.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include "dwmac4.h"
+
+static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ value |= GMAC_CORE_INIT;
+
+ if (mtu > 1500)
+ value |= GMAC_CONFIG_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONFIG_JE;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ /* Mask GMAC interrupts */
+ writel(GMAC_INT_PMT_EN, ioaddr + GMAC_INT_EN);
+}
+
+static void dwmac4_dump_regs(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
+
+ for (i = 0; i < GMAC_REG_NUM; i++) {
+ int offset = i * 4;
+
+ pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+}
+
+static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (hw->rx_csum)
+ value |= GMAC_CONFIG_IPC;
+ else
+ value &= ~GMAC_CONFIG_IPC;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ value = readl(ioaddr + GMAC_CONFIG);
+
+ return !!(value & GMAC_CONFIG_IPC);
+}
+
+static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ pr_debug("GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ pr_debug("GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static void dwmac4_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ unsigned int value = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ value = GMAC_PACKET_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ /* Pass all multi */
+ value = GMAC_PACKET_FILTER_PM;
+ /* Set the 64 bits of the HASH tab. To be updated if taller
+ * hash table is used
+ */
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_PACKET_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the content of the Hash Table Reg 0 and 1.
+ */
+ int bit_nr =
+ (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
+ /* The most significant bit determines the register
+ * to use while the other 5 bits determines the bit
+ * within the selected register
+ */
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
+ }
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ /* Switch to promiscuous mode if more than 128 addrs
+ * are required
+ */
+ value |= GMAC_PACKET_FILTER_PR;
+ } else if (!netdev_uc_empty(dev)) {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+}
+
+static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 channel = STMMAC_CHAN0; /* FIXME */
+ unsigned int flow = 0;
+
+ pr_debug("GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_RX_FLOW_CTRL_RFE;
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+ }
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ if (duplex) {
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+ }
+ }
+}
+
+static void dwmac4_ctrl_ane(struct mac_device_info *hw, bool restart)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ /* auto negotiation enable and External Loopback enable */
+ u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL);
+}
+
+static void dwmac4_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_AN_ADV);
+
+ if (value & GMAC_AN_FD)
+ adv->duplex = DUPLEX_FULL;
+ if (value & GMAC_AN_HD)
+ adv->duplex |= DUPLEX_HALF;
+
+ adv->pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_AN_LPA);
+
+ if (value & GMAC_AN_FD)
+ adv->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_AN_HD)
+ adv->lp_duplex = DUPLEX_HALF;
+
+ adv->lp_pause = (value & GMAC_AN_PSE_MASK) >> GMAC_AN_PSE_SHIFT;
+}
+
+static int dwmac4_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ u32 intr_status;
+ int ret = 0;
+
+ intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ x->mmc_tx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_irq))
+ x->mmc_rx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ x->mmc_rx_csum_offload_irq_n++;
+ /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
+ if (unlikely(intr_status & pmt_irq)) {
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+
+ if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
+ readl(ioaddr + GMAC_AN_STATUS);
+ x->irq_pcs_ane_n++;
+ }
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+ /* Check MTL Interrupt: Currently only one queue is used: Q0. */
+ if (mtl_int_qx_status & MTL_INT_Q0) {
+ /* read Queue 0 Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 value;
+
+ /* Currently only channel 0 is supported */
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
+
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
+
+ /* GMAC debug */
+ value = readl(ioaddr + GMAC_DEBUG);
+
+ if (value & GMAC_DEBUG_TFCSTS_MASK) {
+ u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+ >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+ if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+ x->mac_tx_frame_ctrl_xfer++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+ x->mac_tx_frame_ctrl_pause++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+ x->mac_tx_frame_ctrl_wait++;
+ else
+ x->mac_tx_frame_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TPESTS)
+ x->mac_gmii_tx_proto_engine++;
+ if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ if (value & GMAC_DEBUG_RPESTS)
+ x->mac_gmii_rx_proto_engine++;
+}
+
+static const struct stmmac_ops dwmac4_ops = {
+ .core_init = dwmac4_core_init,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .ctrl_ane = dwmac4_ctrl_ane,
+ .get_adv = dwmac4_get_adv,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+};
+
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id)
+{
+ struct mac_device_info *mac;
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->pcsr = ioaddr;
+ mac->multicast_filter_bins = mcbins;
+ mac->unicast_filter_entries = perfect_uc_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->mac = &dwmac4_ops;
+
+ mac->link.port = GMAC_CONFIG_PS;
+ mac->link.duplex = GMAC_CONFIG_DM;
+ mac->link.speed = GMAC_CONFIG_FES;
+ mac->mii.addr = GMAC_MDIO_ADDR;
+ mac->mii.data = GMAC_MDIO_DATA;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
+
+ if (*synopsys_id > DWMAC_CORE_4_00)
+ mac->dma = &dwmac410_dma_ops;
+ else
+ mac->dma = &dwmac4_dma_ops;
+
+ return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000..4ec7397e7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,389 @@
+/*
+ * This contains the functions to handle the descriptors for DesignWare databook
+ * 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwmac4_descs.h"
+
+static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes3;
+ int ret = tx_done;
+
+ tdes3 = p->des3;
+
+ /* Get tx owner first */
+ if (unlikely(tdes3 & TDES3_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
+ return tx_not_ls;
+
+ if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
+ x->tx_jabber++;
+ if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
+ x->tx_frame_flushed++;
+ if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
+ (tdes3 & TDES3_EXCESSIVE_COLLISION)))
+ stats->collisions +=
+ (tdes3 & TDES3_COLLISION_COUNT_MASK)
+ >> TDES3_COLLISION_COUNT_SHIFT;
+
+ if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
+ x->tx_deferred++;
+
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ x->tx_underflow++;
+
+ if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
+ x->tx_ip_header_error++;
+
+ if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
+ x->tx_payload_error++;
+
+ ret = tx_err;
+ }
+
+ if (unlikely(tdes3 & TDES3_DEFERRED))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes1 = p->des1;
+ unsigned int rdes2 = p->des2;
+ unsigned int rdes3 = p->des3;
+ int message_type;
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & RDES3_OWN))
+ return dma_own;
+
+ /* Verify rx error by looking at the last segment. */
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return discard_frame;
+
+ if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
+ if (unlikely(rdes3 & RDES3_GIANT_PACKET))
+ stats->rx_length_errors++;
+ if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
+ x->rx_gmac_overflow++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
+ x->rx_watchdog++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
+ x->rx_mii++;
+
+ if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+
+ if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
+ x->dribbling_bit++;
+
+ ret = discard_frame;
+ }
+
+ message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes1 & RDES1_IP_HDR_ERROR)
+ x->ip_hdr_err++;
+ if (rdes1 & RDES1_IP_CSUM_BYPASSED)
+ x->ip_csum_bypassed++;
+ if (rdes1 & RDES1_IPV4_HEADER)
+ x->ipv4_pkt_rcvd++;
+ if (rdes1 & RDES1_IPV6_HEADER)
+ x->ipv6_pkt_rcvd++;
+ if (message_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (message_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (message_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (message_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (message_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+
+ if (rdes1 & RDES1_PTP_PACKET_TYPE)
+ x->ptp_frame_type++;
+ if (rdes1 & RDES1_PTP_VER)
+ x->ptp_ver++;
+ if (rdes1 & RDES1_TIMESTAMP_DROPPED)
+ x->timestamp_dropped++;
+
+ if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+
+ if (rdes2 & RDES2_L3_FILTER_MATCH)
+ x->l3_filter_match++;
+ if (rdes2 & RDES2_L4_FILTER_MATCH)
+ x->l4_filter_match++;
+ if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
+ >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ x->l3_l4_filter_no_match++;
+
+ return ret;
+}
+
+static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+{
+ return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
+}
+
+static int dwmac4_get_tx_owner(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
+}
+
+static void dwmac4_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= TDES3_OWN;
+}
+
+static void dwmac4_set_rx_owner(struct dma_desc *p)
+{
+ p->des3 |= RDES3_OWN;
+}
+
+static int dwmac4_get_tx_ls(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
+}
+
+static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (p->des3 & RDES3_PACKET_SIZE_MASK);
+}
+
+static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= TDES2_TIMESTAMP_ENABLE;
+}
+
+static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_TIMESTAMP_STATUS)
+ >> TDES3_TIMESTAMP_STATUS_SHIFT;
+}
+
+/* NOTE: For RX CTX bit has to be checked before
+ * HAVE a specific function for TX and another one for RX
+ */
+static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des0;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des1 * 1000000000ULL;
+
+ return ns;
+}
+
+static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
+ >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+}
+
+static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
+
+ if (!disable_rx_ic)
+ p->des3 |= RDES3_INT_ON_COMPLETION_EN;
+}
+
+static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
+{
+ unsigned int tdes3 = p->des3;
+
+ p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
+
+ if (is_fs)
+ tdes3 |= TDES3_FIRST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+
+ if (likely(csum_flag))
+ tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = p->des3;
+
+ if (len1)
+ p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
+
+ if (len2)
+ p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+ & TDES2_BUFFER2_SIZE_MASK;
+
+ if (is_fs) {
+ tdes3 |= TDES3_FIRST_DESCRIPTOR |
+ TDES3_TCP_SEGMENTATION_ENABLE |
+ ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
+ TDES3_SLOT_NUMBER_MASK) |
+ ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ } else {
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+ }
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
+}
+
+static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ if (p->des0)
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
+ p++;
+ }
+}
+
+static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = mss;
+ p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
+}
+
+const struct stmmac_desc_ops dwmac4_desc_ops = {
+ .tx_status = dwmac4_wrback_get_tx_status,
+ .rx_status = dwmac4_wrback_get_rx_status,
+ .get_tx_len = dwmac4_rd_get_tx_len,
+ .get_tx_owner = dwmac4_get_tx_owner,
+ .set_tx_owner = dwmac4_set_tx_owner,
+ .set_rx_owner = dwmac4_set_rx_owner,
+ .get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
+ .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
+ .get_timestamp = dwmac4_wrback_get_timestamp,
+ .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .set_tx_ic = dwmac4_rd_set_tx_ic,
+ .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+ .release_tx_desc = dwmac4_release_tx_desc,
+ .init_rx_desc = dwmac4_rd_init_rx_desc,
+ .init_tx_desc = dwmac4_rd_init_tx_desc,
+ .display_ring = dwmac4_display_ring,
+ .set_mss = dwmac4_set_mss_ctxt,
+};
+
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000..0902a2ede
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
+/*
+ * Header File to describe the DMA descriptors and related definitions specific
+ * for DesignWare databook 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DESCS_H__
+#define __DWMAC4_DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal transmit descriptor defines (without split feature) */
+
+/* TDES2 (read format) */
+#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
+#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
+#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
+#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
+#define TDES2_TIMESTAMP_ENABLE BIT(30)
+#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
+
+/* TDES3 (read format) */
+#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
+#define TDES3_CHECKSUM_INSERTION_SHIFT 16
+#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
+#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
+#define TDES3_HDR_LEN_SHIFT 19
+#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
+#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
+#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
+
+/* TDES3 (write back format) */
+#define TDES3_IP_HDR_ERROR BIT(0)
+#define TDES3_DEFERRED BIT(1)
+#define TDES3_UNDERFLOW_ERROR BIT(2)
+#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
+#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
+#define TDES3_COLLISION_COUNT_SHIFT 4
+#define TDES3_EXCESSIVE_COLLISION BIT(8)
+#define TDES3_LATE_COLLISION BIT(9)
+#define TDES3_NO_CARRIER BIT(10)
+#define TDES3_LOSS_CARRIER BIT(11)
+#define TDES3_PAYLOAD_ERROR BIT(12)
+#define TDES3_PACKET_FLUSHED BIT(13)
+#define TDES3_JABBER_TIMEOUT BIT(14)
+#define TDES3_ERROR_SUMMARY BIT(15)
+#define TDES3_TIMESTAMP_STATUS BIT(17)
+#define TDES3_TIMESTAMP_STATUS_SHIFT 17
+
+/* TDES3 context */
+#define TDES3_CTXT_TCMSSV BIT(26)
+
+/* TDES3 Common */
+#define TDES3_LAST_DESCRIPTOR BIT(28)
+#define TDES3_LAST_DESCRIPTOR_SHIFT 28
+#define TDES3_FIRST_DESCRIPTOR BIT(29)
+#define TDES3_CONTEXT_TYPE BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define TDES3_OWN BIT(31)
+#define TDES3_OWN_SHIFT 31
+
+/* Normal receive descriptor defines (without split feature) */
+
+/* RDES0 (write back format) */
+#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* RDES1 (write back format) */
+#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
+#define RDES1_IP_HDR_ERROR BIT(3)
+#define RDES1_IPV4_HEADER BIT(4)
+#define RDES1_IPV6_HEADER BIT(5)
+#define RDES1_IP_CSUM_BYPASSED BIT(6)
+#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
+#define RDES1_PTP_PACKET_TYPE BIT(12)
+#define RDES1_PTP_VER BIT(13)
+#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
+#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
+#define RDES1_TIMESTAMP_DROPPED BIT(15)
+#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
+
+/* RDES2 (write back format) */
+#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
+#define RDES2_VLAN_FILTER_STATUS BIT(15)
+#define RDES2_SA_FILTER_FAIL BIT(16)
+#define RDES2_DA_FILTER_FAIL BIT(17)
+#define RDES2_HASH_FILTER_STATUS BIT(18)
+#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
+#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
+#define RDES2_L3_FILTER_MATCH BIT(27)
+#define RDES2_L4_FILTER_MATCH BIT(28)
+#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
+#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+
+/* RDES3 (write back format) */
+#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define RDES3_ERROR_SUMMARY BIT(15)
+#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
+#define RDES3_DRIBBLE_ERROR BIT(19)
+#define RDES3_RECEIVE_ERROR BIT(20)
+#define RDES3_OVERFLOW_ERROR BIT(21)
+#define RDES3_RECEIVE_WATCHDOG BIT(22)
+#define RDES3_GIANT_PACKET BIT(23)
+#define RDES3_CRC_ERROR BIT(24)
+#define RDES3_RDES0_VALID BIT(25)
+#define RDES3_RDES1_VALID BIT(26)
+#define RDES3_RDES2_VALID BIT(27)
+#define RDES3_LAST_DESCRIPTOR BIT(28)
+#define RDES3_FIRST_DESCRIPTOR BIT(29)
+#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+
+/* RDES3 (read format) */
+#define RDES3_BUFFER1_VALID_ADDR BIT(24)
+#define RDES3_BUFFER2_VALID_ADDR BIT(25)
+#define RDES3_INT_ON_COMPLETION_EN BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define RDES3_OWN BIT(31)
+
+#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000..116151cd6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.xx has been used for developing this code.
+ *
+ * This contains the functions to handle the dma.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include "dwmac4.h"
+#include "dwmac4_dma.h"
+
+static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ pr_info("dwmac4: Master AXI performs %s burst length\n",
+ (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
+ break;
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+}
+
+static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
+ u32 dma_tx_phy, u32 dma_rx_phy,
+ u32 channel)
+{
+ u32 value;
+
+ /* set PBL for each channels. Currently we affect same configuration
+ * on each channel
+ */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+
+ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+}
+
+static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ /* Set the Fixed burst mode */
+ if (fb)
+ value |= DMA_SYS_BUS_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (mb)
+ value |= DMA_SYS_BUS_MB;
+
+ if (aal)
+ value |= DMA_SYS_BUS_AAL;
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
+}
+
+static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
+{
+ pr_debug(" Channel %d\n", channel);
+ pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
+ readl(ioaddr + DMA_CHAN_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
+ readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
+ readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
+ readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
+ readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
+ readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
+ readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
+ readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
+ pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
+ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
+ pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
+ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
+ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
+ readl(ioaddr + DMA_CHAN_STATUS(channel)));
+}
+
+static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+
+ pr_debug(" GMAC4 DMA registers\n");
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ _dwmac4_dump_dma_regs(ioaddr, i);
+}
+
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ int i;
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+}
+
+static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, u32 channel)
+{
+ u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+
+ /* Following code only done for channel 0, other channels not yet
+ * supported.
+ */
+ mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ if (txmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ mtl_tx_op |= MTL_OP_MODE_TSF;
+ } else {
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ mtl_tx_op &= ~MTL_OP_MODE_TSF;
+ mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ /* Set the transmit threshold */
+ if (txmode <= 32)
+ mtl_tx_op |= MTL_OP_MODE_TTC_32;
+ else if (txmode <= 64)
+ mtl_tx_op |= MTL_OP_MODE_TTC_64;
+ else if (txmode <= 96)
+ mtl_tx_op |= MTL_OP_MODE_TTC_96;
+ else if (txmode <= 128)
+ mtl_tx_op |= MTL_OP_MODE_TTC_128;
+ else if (txmode <= 192)
+ mtl_tx_op |= MTL_OP_MODE_TTC_192;
+ else if (txmode <= 256)
+ mtl_tx_op |= MTL_OP_MODE_TTC_256;
+ else if (txmode <= 384)
+ mtl_tx_op |= MTL_OP_MODE_TTC_384;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TTC_512;
+ }
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (rxmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (rxmode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (rxmode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (rxmode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ /* Enable MTL RX overflow */
+ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+ ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, int rxfifosz)
+{
+ /* Only Channel 0 is actually configured and used */
+ dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+}
+
+static void dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
+
+ /* MAC HW feature0 */
+ dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
+ dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
+ dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
+ dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
+ dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
+ dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
+ dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
+
+ /* MAC HW feature1 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+ dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* MAC HW feature2 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel =
+ ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_channel =
+ ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp = 0;
+}
+
+/* Enable/disable TSO feature and set MSS */
+static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value;
+
+ if (en) {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value | DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ } else {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value & ~DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ }
+}
+
+const struct stmmac_dma_ops dwmac4_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac4_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
+
+const struct stmmac_dma_ops dwmac410_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac410_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000..1b06df749
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
+/*
+ * DWMAC4 DMA Header file.
+ *
+ *
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DMA_H__
+#define __DWMAC4_DMA_H__
+
+/* Define the max channel number used for tx (also rx).
+ * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
+ */
+#define DMA_CHANNEL_NB_MAX 1
+
+#define DMA_BUS_MODE 0x00001000
+#define DMA_SYS_BUS_MODE 0x00001004
+#define DMA_STATUS 0x00001008
+#define DMA_DEBUG_STATUS_0 0x0000100c
+#define DMA_DEBUG_STATUS_1 0x00001010
+#define DMA_DEBUG_STATUS_2 0x00001014
+#define DMA_AXI_BUS_MODE 0x00001028
+
+/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_SFT_RESET BIT(0)
+
+/* DMA SYS Bus Mode bitmap */
+#define DMA_BUS_MODE_SPH BIT(24)
+#define DMA_BUS_MODE_PBL BIT(16)
+#define DMA_BUS_MODE_PBL_SHIFT 16
+#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_BUS_MODE_MB BIT(14)
+#define DMA_BUS_MODE_FB BIT(0)
+
+/* DMA Interrupt top status */
+#define DMA_STATUS_MAC BIT(17)
+#define DMA_STATUS_MTL BIT(16)
+#define DMA_STATUS_CHAN7 BIT(7)
+#define DMA_STATUS_CHAN6 BIT(6)
+#define DMA_STATUS_CHAN5 BIT(5)
+#define DMA_STATUS_CHAN4 BIT(4)
+#define DMA_STATUS_CHAN3 BIT(3)
+#define DMA_STATUS_CHAN2 BIT(2)
+#define DMA_STATUS_CHAN1 BIT(1)
+#define DMA_STATUS_CHAN0 BIT(0)
+
+/* DMA debug status bitmap */
+#define DMA_DEBUG_STATUS_TS_MASK 0xf
+#define DMA_DEBUG_STATUS_RS_MASK 0xf
+
+/* DMA AXI bitmap */
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 24
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+
+#define DMA_SYS_BUS_MB BIT(14)
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_SYS_BUS_FB BIT(0)
+
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
+/* Following DMA defines are chanels oriented */
+#define DMA_CHAN_BASE_ADDR 0x00001100
+#define DMA_CHAN_BASE_OFFSET 0x80
+#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
+ (x * DMA_CHAN_BASE_OFFSET))
+#define DMA_CHAN_REG_NUMBER 17
+
+#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
+#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
+#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
+#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
+
+/* DMA Control X */
+#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
+
+/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
+/* DMA Rx Channel X Control register defines */
+#define DMA_CONTROL_SR BIT(0)
+
+/* Interrupt status per channel */
+#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
+#define DMA_CHAN_STATUS_REB_SHIFT 19
+#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
+#define DMA_CHAN_STATUS_TEB_SHIFT 16
+#define DMA_CHAN_STATUS_NIS BIT(15)
+#define DMA_CHAN_STATUS_AIS BIT(14)
+#define DMA_CHAN_STATUS_CDE BIT(13)
+#define DMA_CHAN_STATUS_FBE BIT(12)
+#define DMA_CHAN_STATUS_ERI BIT(11)
+#define DMA_CHAN_STATUS_ETI BIT(10)
+#define DMA_CHAN_STATUS_RWT BIT(9)
+#define DMA_CHAN_STATUS_RPS BIT(8)
+#define DMA_CHAN_STATUS_RBU BIT(7)
+#define DMA_CHAN_STATUS_RI BIT(6)
+#define DMA_CHAN_STATUS_TBU BIT(2)
+#define DMA_CHAN_STATUS_TPS BIT(1)
+#define DMA_CHAN_STATUS_TI BIT(0)
+
+/* Interrupt enable bits per channel */
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_CDE BIT(13)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_ERE BIT(11)
+#define DMA_CHAN_INTR_ENA_ETE BIT(10)
+#define DMA_CHAN_INTR_ENA_RWE BIT(9)
+#define DMA_CHAN_INTR_ENA_RSE BIT(8)
+#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
+#define DMA_CHAN_INTR_ENA_TSE BIT(1)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+
+/* channel 0 specific fields */
+#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
+#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
+#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
+#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
+
+int dwmac4_dma_reset(void __iomem *ioaddr);
+void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr);
+void dwmac4_dma_start_tx(void __iomem *ioaddr);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr);
+void dwmac4_dma_start_rx(void __iomem *ioaddr);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+
+#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000..c7326d5b2
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "dwmac4_dma.h"
+#include "dwmac4.h"
+
+int dwmac4_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+}
+
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+}
+
+void dwmac4_dma_start_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_start_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_SR;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
+ DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+ ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+
+ /* Clear the interrupt by writing a logic 1 to the chanX interrupt
+ * status [21-0] expect reserved bits [5-3]
+ */
+ writel((intr_status & 0x3fffc7),
+ ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+
+ return ret;
+}
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+ else
+ value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c7c..38f19c99c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
}
}
+static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ int i;
+
+ pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+ .display_ring = enh_desc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c24913..38a1a5603 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
* current value.*/
#define MMC_CNTRL_PRESET 0x10
#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+
+#define MMC_GMAC4_OFFSET 0x700
+#define MMC_GMAC3_X_OFFSET 0x100
+
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1fe..ce9aa7928 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
/* MAC Management Counters register offset */
-#define MMC_CNTRL 0x00000100 /* MMC Control */
-#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
-#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
-#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
-#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
-#define MMC_DEFAULT_MASK 0xffffffff
+#define MMC_CNTRL 0x00 /* MMC Control */
+#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@@ -41,115 +41,115 @@
* _GB register stands for good and bad frames
* _G is for good only.
*/
-#define MMC_TX_OCTETCOUNT_GB 0x00000114
-#define MMC_TX_FRAMECOUNT_GB 0x00000118
-#define MMC_TX_BROADCASTFRAME_G 0x0000011c
-#define MMC_TX_MULTICASTFRAME_G 0x00000120
-#define MMC_TX_64_OCTETS_GB 0x00000124
-#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
-#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
-#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
-#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
-#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
-#define MMC_TX_UNICAST_GB 0x0000013c
-#define MMC_TX_MULTICAST_GB 0x00000140
-#define MMC_TX_BROADCAST_GB 0x00000144
-#define MMC_TX_UNDERFLOW_ERROR 0x00000148
-#define MMC_TX_SINGLECOL_G 0x0000014c
-#define MMC_TX_MULTICOL_G 0x00000150
-#define MMC_TX_DEFERRED 0x00000154
-#define MMC_TX_LATECOL 0x00000158
-#define MMC_TX_EXESSCOL 0x0000015c
-#define MMC_TX_CARRIER_ERROR 0x00000160
-#define MMC_TX_OCTETCOUNT_G 0x00000164
-#define MMC_TX_FRAMECOUNT_G 0x00000168
-#define MMC_TX_EXCESSDEF 0x0000016c
-#define MMC_TX_PAUSE_FRAME 0x00000170
-#define MMC_TX_VLAN_FRAME_G 0x00000174
+#define MMC_TX_OCTETCOUNT_GB 0x14
+#define MMC_TX_FRAMECOUNT_GB 0x18
+#define MMC_TX_BROADCASTFRAME_G 0x1c
+#define MMC_TX_MULTICASTFRAME_G 0x20
+#define MMC_TX_64_OCTETS_GB 0x24
+#define MMC_TX_65_TO_127_OCTETS_GB 0x28
+#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x30
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
+#define MMC_TX_UNICAST_GB 0x3c
+#define MMC_TX_MULTICAST_GB 0x40
+#define MMC_TX_BROADCAST_GB 0x44
+#define MMC_TX_UNDERFLOW_ERROR 0x48
+#define MMC_TX_SINGLECOL_G 0x4c
+#define MMC_TX_MULTICOL_G 0x50
+#define MMC_TX_DEFERRED 0x54
+#define MMC_TX_LATECOL 0x58
+#define MMC_TX_EXESSCOL 0x5c
+#define MMC_TX_CARRIER_ERROR 0x60
+#define MMC_TX_OCTETCOUNT_G 0x64
+#define MMC_TX_FRAMECOUNT_G 0x68
+#define MMC_TX_EXCESSDEF 0x6c
+#define MMC_TX_PAUSE_FRAME 0x70
+#define MMC_TX_VLAN_FRAME_G 0x74
/* MMC RX counter registers */
-#define MMC_RX_FRAMECOUNT_GB 0x00000180
-#define MMC_RX_OCTETCOUNT_GB 0x00000184
-#define MMC_RX_OCTETCOUNT_G 0x00000188
-#define MMC_RX_BROADCASTFRAME_G 0x0000018c
-#define MMC_RX_MULTICASTFRAME_G 0x00000190
-#define MMC_RX_CRC_ERROR 0x00000194
-#define MMC_RX_ALIGN_ERROR 0x00000198
-#define MMC_RX_RUN_ERROR 0x0000019C
-#define MMC_RX_JABBER_ERROR 0x000001A0
-#define MMC_RX_UNDERSIZE_G 0x000001A4
-#define MMC_RX_OVERSIZE_G 0x000001A8
-#define MMC_RX_64_OCTETS_GB 0x000001AC
-#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
-#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
-#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
-#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
-#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
-#define MMC_RX_UNICAST_G 0x000001c4
-#define MMC_RX_LENGTH_ERROR 0x000001c8
-#define MMC_RX_AUTOFRANGETYPE 0x000001cc
-#define MMC_RX_PAUSE_FRAMES 0x000001d0
-#define MMC_RX_FIFO_OVERFLOW 0x000001d4
-#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
-#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+#define MMC_RX_FRAMECOUNT_GB 0x80
+#define MMC_RX_OCTETCOUNT_GB 0x84
+#define MMC_RX_OCTETCOUNT_G 0x88
+#define MMC_RX_BROADCASTFRAME_G 0x8c
+#define MMC_RX_MULTICASTFRAME_G 0x90
+#define MMC_RX_CRC_ERROR 0x94
+#define MMC_RX_ALIGN_ERROR 0x98
+#define MMC_RX_RUN_ERROR 0x9C
+#define MMC_RX_JABBER_ERROR 0xA0
+#define MMC_RX_UNDERSIZE_G 0xA4
+#define MMC_RX_OVERSIZE_G 0xA8
+#define MMC_RX_64_OCTETS_GB 0xAC
+#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
+#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
+#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
+#define MMC_RX_UNICAST_G 0xc4
+#define MMC_RX_LENGTH_ERROR 0xc8
+#define MMC_RX_AUTOFRANGETYPE 0xcc
+#define MMC_RX_PAUSE_FRAMES 0xd0
+#define MMC_RX_FIFO_OVERFLOW 0xd4
+#define MMC_RX_VLAN_FRAMES_GB 0xd8
+#define MMC_RX_WATCHDOG_ERROR 0xdc
/* IPC*/
-#define MMC_RX_IPC_INTR_MASK 0x00000200
-#define MMC_RX_IPC_INTR 0x00000208
+#define MMC_RX_IPC_INTR_MASK 0x100
+#define MMC_RX_IPC_INTR 0x108
/* IPv4*/
-#define MMC_RX_IPV4_GD 0x00000210
-#define MMC_RX_IPV4_HDERR 0x00000214
-#define MMC_RX_IPV4_NOPAY 0x00000218
-#define MMC_RX_IPV4_FRAG 0x0000021C
-#define MMC_RX_IPV4_UDSBL 0x00000220
+#define MMC_RX_IPV4_GD 0x110
+#define MMC_RX_IPV4_HDERR 0x114
+#define MMC_RX_IPV4_NOPAY 0x118
+#define MMC_RX_IPV4_FRAG 0x11C
+#define MMC_RX_IPV4_UDSBL 0x120
-#define MMC_RX_IPV4_GD_OCTETS 0x00000250
-#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
-#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
-#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
-#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+#define MMC_RX_IPV4_GD_OCTETS 0x150
+#define MMC_RX_IPV4_HDERR_OCTETS 0x154
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
+#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
/* IPV6*/
-#define MMC_RX_IPV6_GD_OCTETS 0x00000264
-#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
-#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+#define MMC_RX_IPV6_GD_OCTETS 0x164
+#define MMC_RX_IPV6_HDERR_OCTETS 0x168
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
-#define MMC_RX_IPV6_GD 0x00000224
-#define MMC_RX_IPV6_HDERR 0x00000228
-#define MMC_RX_IPV6_NOPAY 0x0000022c
+#define MMC_RX_IPV6_GD 0x124
+#define MMC_RX_IPV6_HDERR 0x128
+#define MMC_RX_IPV6_NOPAY 0x12c
/* Protocols*/
-#define MMC_RX_UDP_GD 0x00000230
-#define MMC_RX_UDP_ERR 0x00000234
-#define MMC_RX_TCP_GD 0x00000238
-#define MMC_RX_TCP_ERR 0x0000023c
-#define MMC_RX_ICMP_GD 0x00000240
-#define MMC_RX_ICMP_ERR 0x00000244
+#define MMC_RX_UDP_GD 0x130
+#define MMC_RX_UDP_ERR 0x134
+#define MMC_RX_TCP_GD 0x138
+#define MMC_RX_TCP_ERR 0x13c
+#define MMC_RX_ICMP_GD 0x140
+#define MMC_RX_ICMP_ERR 0x144
-#define MMC_RX_UDP_GD_OCTETS 0x00000270
-#define MMC_RX_UDP_ERR_OCTETS 0x00000274
-#define MMC_RX_TCP_GD_OCTETS 0x00000278
-#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
-#define MMC_RX_ICMP_GD_OCTETS 0x00000280
-#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+#define MMC_RX_UDP_GD_OCTETS 0x170
+#define MMC_RX_UDP_ERR_OCTETS 0x174
+#define MMC_RX_TCP_GD_OCTETS 0x178
+#define MMC_RX_TCP_ERR_OCTETS 0x17c
+#define MMC_RX_ICMP_GD_OCTETS 0x180
+#define MMC_RX_ICMP_ERR_OCTETS 0x184
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
- u32 value = readl(ioaddr + MMC_CNTRL);
+ u32 value = readl(mmcaddr + MMC_CNTRL);
value |= (mode & 0x3F);
- writel(value, ioaddr + MMC_CNTRL);
+ writel(value, mmcaddr + MMC_CNTRL);
pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
MMC_CNTRL, value);
}
/* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
- mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
- mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
- mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
- mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
- mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
+ MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
+ MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
mmc->mmc_tx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
mmc->mmc_tx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
mmc->mmc_tx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
mmc->mmc_tx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
mmc->mmc_tx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
- mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
- mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
- mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
- mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
- mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
- mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
- mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
- mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
- mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
- mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
- mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
- mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
- mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
- mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+ readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
/* MMC RX counter registers */
- mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
- mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
- mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
- mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
- mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
- mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
- mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
- mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
- mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
- mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
- mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
+ MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
+ MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
+ mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
mmc->mmc_rx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
mmc->mmc_rx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
mmc->mmc_rx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
mmc->mmc_rx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
mmc->mmc_rx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
- mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
- mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
- mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
- mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
- mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
- mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
/* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
- mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
- mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
- mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
- mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
- mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+ mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
- mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
mmc->mmc_rx_ipv4_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
mmc->mmc_rx_ipv4_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
- mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
+ MMC_RX_IPV4_FRAG_OCTETS);
mmc->mmc_rx_ipv4_udsbl_octets +=
- readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
/* IPV6 */
- mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
mmc->mmc_rx_ipv6_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
mmc->mmc_rx_ipv6_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
- mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
- mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
- mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+ mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
/* Protocols */
- mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
- mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
- mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
- mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
- mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
- mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+ mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
- mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
- mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
- mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
- mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
- mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
- mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+ mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 011386f6f..2beacd0d3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
return 1;
}
+static void ndesc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int)virt_to_phys(p),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+ .display_ring = ndesc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab9789..59ae6088c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Oct_2015"
+#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/stmmac.h>
@@ -67,6 +67,7 @@ struct stmmac_priv {
spinlock_t tx_lock;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
+ bool tso;
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
struct dma_extended_desc *dma_erx;
@@ -128,6 +129,10 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
+ void __iomem *mmcaddr;
+ u32 rx_tail_addr;
+ u32 tx_tail_addr;
+ u32 mss;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
@@ -143,9 +148,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_resume(struct net_device *ndev);
-int stmmac_suspend(struct net_device *ndev);
-int stmmac_dvr_remove(struct net_device *ndev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928edf..e2b98b016 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
+ /* TSO */
+ STMMAC_STAT(tx_tso_frames),
+ STMMAC_STAT(tx_tso_nfrags),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -499,14 +502,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
- if (!priv->plat->has_gmac)
+ if (priv->hw->dma->dma_diagnostic_fr)
priv->hw->dma->dma_diagnostic_fr(&dev->stats,
(void *) &priv->xstats,
priv->ioaddr);
else {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
- dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+ dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fcbd4be56..e4071265b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
#include "dwmac1000.h"
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
#define TX_TIMEO 5000
@@ -721,13 +722,15 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->plat->has_gmac))
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4)))
ctrl &= ~priv->hw->link.port;
stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->plat->has_gmac) {
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4))) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -875,53 +878,22 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-/**
- * stmmac_display_ring - display ring
- * @head: pointer to the head of the ring passed.
- * @size: size of the ring.
- * @extend_desc: to verify if extended descriptors are used.
- * Description: display the control/status and buffer descriptors.
- */
-static void stmmac_display_ring(void *head, int size, int extend_desc)
-{
- int i;
- struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
- struct dma_desc *p = (struct dma_desc *)head;
-
- for (i = 0; i < size; i++) {
- u64 x;
- if (extend_desc) {
- x = *(u64 *) ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- ep->basic.des2, ep->basic.des3);
- ep++;
- } else {
- x = *(u64 *) p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
- i, (unsigned int)virt_to_phys(p),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
- p++;
- }
- pr_info("\n");
- }
-}
-
static void stmmac_display_rings(struct stmmac_priv *priv)
{
+ void *head_rx, *head_tx;
+
if (priv->extend_desc) {
- pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
- pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
+ head_rx = (void *)priv->dma_erx;
+ head_tx = (void *)priv->dma_etx;
} else {
- pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
- pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
+ head_rx = (void *)priv->dma_rx;
+ head_tx = (void *)priv->dma_tx;
}
+
+ /* Display Rx ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ /* Display Tx ring */
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1000,7 +972,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- p->des2 = priv->rx_skbuff_dma[i];
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ p->des0 = priv->rx_skbuff_dma[i];
+ else
+ p->des2 = priv->rx_skbuff_dma[i];
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1091,7 +1066,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p = &((priv->dma_etx + i)->basic);
else
p = priv->dma_tx + i;
- p->des2 = 0;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff_dma[i].len = 0;
@@ -1354,9 +1338,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].len = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
- priv->hw->mode->clean_desc3(priv, p);
+
+ if (priv->hw->mode->clean_desc3)
+ priv->hw->mode->clean_desc3(priv, p);
+
priv->tx_skbuff_dma[entry].last_segment = false;
priv->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1479,41 +1467,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
- MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
+ else
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- dwmac_mmc_intr_all_mask(priv->ioaddr);
+ dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->ioaddr, mode);
+ dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
pr_info(" No MAC Management Counters available\n");
}
/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
-{
- u32 hwid = priv->hw->synopsys_uid;
-
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
-
-/**
* stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1550,51 +1520,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 hw_cap = 0;
+ u32 ret = 0;
if (priv->hw->dma->get_hw_feature) {
- hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
- priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
- priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
- priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
- priv->dma_cap.pmt_remote_wake_up =
- (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
- priv->dma_cap.pmt_magic_frame =
- (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
- /* MMC */
- priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
- /* IEEE 1588-2002 */
- priv->dma_cap.time_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
- /* IEEE 1588-2008 */
- priv->dma_cap.atime_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
- /* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
- priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
- /* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
- priv->dma_cap.rx_coe_type1 =
- (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
- priv->dma_cap.rx_coe_type2 =
- (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
- priv->dma_cap.rxfifo_over_2048 =
- (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
- /* TX and RX number of channels */
- priv->dma_cap.number_rx_channel =
- (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
- priv->dma_cap.number_tx_channel =
- (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
- /* Alternate (enhanced) DESC mode */
- priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
- }
-
- return hw_cap;
+ priv->hw->dma->get_hw_feature(priv->ioaddr,
+ &priv->dma_cap);
+ ret = 1;
+ }
+
+ return ret;
}
/**
@@ -1650,8 +1584,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
- (priv->plat->axi && priv->hw->dma->axi))
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->rx_tail_addr = priv->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+ STMMAC_CHAN0);
+
+ priv->tx_tail_addr = priv->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+ }
+
+ if (priv->plat->axi && priv->hw->dma->axi)
priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
return ret;
@@ -1731,7 +1676,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_dwmac4_set_mac(priv->ioaddr, true);
+ else
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1769,6 +1717,18 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->pcs && priv->hw->mac->ctrl_ane)
priv->hw->mac->ctrl_ane(priv->hw, 0);
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1));
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1));
+ /* Enable TSO */
+ if (priv->tso)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
+
return 0;
}
@@ -1934,6 +1894,239 @@ static int stmmac_release(struct net_device *dev)
}
/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segmant: condition for the last descriptor
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+ int total_len, bool last_segment)
+{
+ struct dma_desc *desc;
+ int tmp_len;
+ u32 buff_size;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ desc = priv->dma_tx + priv->cur_tx;
+
+ desc->des0 = des + (total_len - tmp_len);
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 pay_len, mss;
+ int tmp_pay_len = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, des;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ u8 proto_hdr_len;
+ int i;
+
+ spin_lock(&priv->tx_lock);
+
+ /* Compute header lengths */
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ }
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != priv->mss) {
+ mss_desc = priv->dma_tx + priv->cur_tx;
+ priv->hw->desc->set_mss(mss_desc, mss);
+ priv->mss = mss;
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ first_entry = priv->cur_tx;
+
+ desc = priv->dma_tx + first_entry;
+ first = desc;
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = des;
+ priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ priv->tx_skbuff[first_entry] = skb;
+
+ first->des0 = des;
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = des + proto_hdr_len;
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1));
+
+ priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+ priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+ priv->tx_skbuff[priv->cur_tx] = NULL;
+ priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ }
+
+ priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (netif_msg_hw(priv))
+ pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ /* Manage tx mitigation */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ priv->hw->desc->prepare_tso_tx_desc(first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc)
+ priv->hw->desc->set_tx_owner(mss_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ priv->cur_tx, first, nfrags);
+
+ priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ 0);
+
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_sent_queue(dev, skb->len);
+
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ spin_unlock(&priv->tx_lock);
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -1950,6 +2143,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
unsigned int enh_desc;
+ unsigned int des;
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ return stmmac_tso_xmit(skb, dev);
+ }
spin_lock(&priv->tx_lock);
@@ -1985,7 +2185,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (unlikely(is_jumbo)) {
+ if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+ DWMAC_CORE_4_00)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -2003,13 +2204,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc = priv->dma_tx + entry;
- desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
priv->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ desc->des0 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des0;
+ } else {
+ desc->des2 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ }
+
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2024,16 +2233,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
+ void *tx_head;
+
pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, priv->cur_tx, priv->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx,
- DMA_TX_SIZE, 1);
+ tx_head = (void *)priv->dma_etx;
else
- stmmac_display_ring((void *)priv->dma_tx,
- DMA_TX_SIZE, 0);
+ tx_head = (void *)priv->dma_tx;
+
+ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
@@ -2072,12 +2283,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
bool last_segment = (nfrags == 0);
- first->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, first->des2))
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ first->des0 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des0;
+ } else {
+ first->des2 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ }
+
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -2101,7 +2319,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_sent_queue(dev, skb->len);
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ else
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
@@ -2183,9 +2406,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dev_kfree_skb(skb);
break;
}
- p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->mode->refill_desc3(priv, p);
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ p->des0 = priv->rx_skbuff_dma[entry];
+ p->des1 = 0;
+ } else {
+ p->des2 = priv->rx_skbuff_dma[entry];
+ }
+ if (priv->hw->mode->refill_desc3)
+ priv->hw->mode->refill_desc3(priv, p);
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
@@ -2193,9 +2422,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
-
wmb();
- priv->hw->desc->set_rx_owner(p);
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+ else
+ priv->hw->desc->set_rx_owner(p);
+
wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2218,13 +2451,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx,
- DMA_RX_SIZE, 1);
+ rx_head = (void *)priv->dma_erx;
else
- stmmac_display_ring((void *)priv->dma_rx,
- DMA_RX_SIZE, 0);
+ rx_head = (void *)priv->dma_rx;
+
+ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
@@ -2274,11 +2509,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
} else {
struct sk_buff *skb;
int frame_len;
+ unsigned int des;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ des = p->des0;
+ else
+ des = p->des2;
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
- /* check if frame_len fits the preallocated memory */
+ /* If frame length is greather than skb buffer size
+ * (preallocated during init) then the packet is
+ * ignored
+ */
if (frame_len > priv->dma_buf_sz) {
+ pr_err("%s: len %d larger than size (%d)\n",
+ priv->dev->name, frame_len,
+ priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@@ -2291,14 +2538,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, p->des2);
+ p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- if (unlikely((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv))) {
+ /* The zero-copy is always used for all the sizes
+ * in case of GMAC4 because it needs
+ * to refill the used descriptors, always.
+ */
+ if (unlikely(!priv->plat->has_gmac4 &&
+ ((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2450,7 +2702,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->enh_desc)
+ if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
max_mtu = JUMBO_LEN;
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2464,6 +2716,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
}
dev->mtu = new_mtu;
+
netdev_update_features(dev);
return 0;
@@ -2488,6 +2741,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
return features;
}
@@ -2534,7 +2795,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
/* To handle GMAC own interrupts */
- if (priv->plat->has_gmac) {
+ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
if (unlikely(status)) {
@@ -2543,6 +2804,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ priv->rx_tail_addr,
+ STMMAC_CHAN0);
}
}
@@ -2615,15 +2880,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des0, ep->basic.des1,
ep->basic.des2, ep->basic.des3);
ep++;
} else {
x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
seq_printf(seq, "\n");
@@ -2706,10 +2970,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
- (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
- (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
(priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2818,27 +3087,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries);
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
+ } else if (priv->plat->has_gmac4) {
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac = dwmac4_setup(priv->ioaddr,
+ priv->plat->multicast_filter_bins,
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
} else {
- mac = dwmac100_setup(priv->ioaddr);
+ mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
}
if (!mac)
return -ENOMEM;
priv->hw = mac;
- /* Get and dump the chip ID */
- priv->synopsys_id = stmmac_get_synopsys_id(priv);
-
/* To use the chained or ring mode */
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- pr_info(" Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->mode = &dwmac4_ring_mode_ops;
} else {
- priv->hw->mode = &ring_mode_ops;
- pr_info(" Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
+ if (chain_mode) {
+ priv->hw->mode = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->mode = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
}
/* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2860,6 +3137,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else
priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
if (priv->dma_cap.rx_coe_type2)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
else if (priv->dma_cap.rx_coe_type1)
@@ -2868,13 +3148,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* To use alternate (extended) or normal descriptor structures */
- stmmac_selec_desc_mode(priv);
+ /* To use alternate (extended), normal or GMAC4 descriptor structures */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->hw->desc = &dwmac4_desc_ops;
+ else
+ stmmac_selec_desc_mode(priv);
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
- pr_info(" RX Checksum Offload Engine supported (type %d)\n",
- priv->plat->rx_coe);
+ pr_info(" RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
}
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
@@ -2884,6 +3168,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
+ if (priv->dma_cap.tsoen)
+ pr_info(" TSO supported\n");
+
return 0;
}
@@ -2987,6 +3274,12 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO;
+ priv->tso = true;
+ pr_info(" TSO feature enabled\n");
+ }
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -3062,12 +3355,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
pr_info("%s:\n\tremoving driver", __func__);
@@ -3093,13 +3387,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
/**
* stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this is the function to suspend the device and it is called
* by the platform driver to stop the network queue, release the resources,
* program the PMT register (for WoL), clean and release driver resources.
*/
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -3142,20 +3437,19 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
* in a usable state.
*/
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -3163,7 +3457,9 @@ int stmmac_resume(struct net_device *ndev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -3177,10 +3473,17 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ /* reset private mss value to force mss context settings at
+ * next tso xmit (only used for gmac4).
+ */
+ priv->mss = 0;
+
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 8683a2169..ec2958518 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -37,6 +37,18 @@
#define MII_BUSY 0x00000001
#define MII_WRITE 0x00000002
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+
+#define MII_PHY_ADDR_GMAC4_SHIFT 21
+#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
+#define MII_PHY_REG_GMAC4_SHIFT 16
+#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
+#define MII_CSR_CLK_GMAC4_SHIFT 8
+#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
+
static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
{
unsigned long curr;
@@ -124,6 +136,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
}
/**
+ * stmmac_mdio_read_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * Description: it reads data from the MII register of GMAC4 from within
+ * the phy device.
+ */
+static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ int data;
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ writel(value, priv->ioaddr + mii_address);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * @phydata: phy data
+ * Description: it writes the data into the MII register of GMAC4 from within
+ * the device.
+ */
+static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ /* Wait until any existing MII operation is complete */
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
+/**
* stmmac_mdio_reset
* @bus: points to the mii_bus structure
* Description: reset the MII bus
@@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
/* This is a workaround for problems with the STE101P PHY.
* It doesn't complete its reset until at least one clock cycle
- * on MDC, so perform a dummy mdio read.
+ * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+ * if needed.
*/
- writel(0, priv->ioaddr + mii_address);
+ if (!priv->plat->has_gmac4)
+ writel(0, priv->ioaddr + mii_address);
#endif
return 0;
}
@@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+ if (priv->plat->has_gmac4) {
+ new_bus->read = &stmmac_mdio_read_gmac4;
+ new_bus->write = &stmmac_mdio_write_gmac4;
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ }
+
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index ae4388735..56c8a2342 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- stmmac_dvr_remove(ndev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_resume(ndev);
+ stmmac_dvr_remove(&pdev->dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
#define STMMAC_VENDOR_ID 0x700
#define STMMAC_QUARK_ID 0x0937
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cf37ea558..409db913b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -284,6 +284,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+ of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+ plat->has_gmac4 = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
of_device_is_compatible(np, "snps,dwmac-3.710")) {
plat->enh_desc = 1;
@@ -379,7 +386,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int ret = stmmac_dvr_remove(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -403,7 +410,7 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_suspend(ndev);
+ ret = stmmac_suspend(dev);
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -426,7 +433,7 @@ static int stmmac_pltfr_resume(struct device *dev)
if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_resume(ndev);
+ return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9cc45649f..a2371aa14 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void niu_netif_stop(struct niu *np)
{
- np->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(np->dev); /* prevent tx timeout */
niu_disable_napi(np);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 243722771..d6ad0fbd0 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp)
static inline void gem_netif_stop(struct gem *gp)
{
- gp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(gp->dev); /* prevent tx timeout */
napi_disable(&gp->napi);
netif_tx_disable(gp->dev);
}
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index af11ed1e0..158213cd6 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev)
if (status_change) {
if (phydev->link) {
- lp->ndev->trans_start = jiffies;
+ netif_trans_update(lp->ndev);
dwceqos_link_up(lp);
} else {
dwceqos_link_down(lp);
@@ -2203,7 +2203,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netdev_sent_queue(ndev, skb->len);
spin_unlock_bh(&lp->tx_lock);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
return 0;
tx_error:
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index e34ca5fd3..f7540dd6b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
* o NETDEV_TX_BUSY Cannot transmit packet, try later
* Usually a bug, means queue start/stop flow control is broken in
* the driver. Note: the driver must NOT put the skb in its DMA ring.
- * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
*/
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
struct net_device *ndev)
@@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
ENTER;
local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
- BDX_DRV_NAME, ndev->name);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock(&priv->tx_lock);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
@@ -1707,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e2fcdf1ee..53190894f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -380,7 +380,6 @@ struct cpsw_priv {
u32 coal_intvl;
u32 bus_freq_mhz;
int rx_packet_max;
- int host_port;
struct clk *clk;
u8 mac_addr[ETH_ALEN];
struct cpsw_slave *slaves;
@@ -530,21 +529,18 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
int slave_port = cpsw_get_slave_port(priv, \
slave->slave_num); \
cpsw_ale_add_mcast(priv->ale, addr, \
- 1 << slave_port | 1 << priv->host_port, \
+ 1 << slave_port | ALE_PORT_HOST, \
ALE_VLAN, slave->port_vlan, 0); \
} else { \
cpsw_ale_add_mcast(priv->ale, addr, \
- ALE_ALL_PORTS << priv->host_port, \
+ ALE_ALL_PORTS, \
0, 0, 0); \
} \
} while (0)
static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
+ return slave_num + 1;
}
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
@@ -601,8 +597,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port, -1);
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -647,8 +642,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
- vid);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -1091,7 +1085,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
{
- u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+ u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
if (priv->version == CPSW_VERSION_1)
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
@@ -1102,7 +1096,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1180,7 +1174,6 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
const int vlan = priv->data.default_vlan;
- const int port = priv->host_port;
u32 reg;
int i;
int unreg_mcast_mask;
@@ -1198,9 +1191,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
else
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
- cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
- ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
- unreg_mcast_mask << port);
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ unreg_mcast_mask);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1213,7 +1206,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&priv->regs->control);
control_reg |= CPSW_VLAN_AWARE;
@@ -1227,14 +1220,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
&priv->host_port_regs->cpdma_tx_pri_map);
__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
- cpsw_ale_control_set(priv->ale, priv->host_port,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
if (!priv->data.dual_emac) {
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
0, 0);
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
}
}
@@ -1281,8 +1274,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_add_default_vlan(priv);
else
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
- ALE_ALL_PORTS << priv->host_port,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
@@ -1347,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
@@ -1397,7 +1389,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
@@ -1628,9 +1620,9 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags = ALE_VLAN;
}
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
flags, vid);
- cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
@@ -1674,12 +1666,12 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
}
ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
- unreg_mcast_mask << priv->host_port);
+ unreg_mcast_mask);
if (ret != 0)
return ret;
ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
goto clean_vid;
@@ -1691,7 +1683,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
clean_vlan_ucast:
cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
clean_vid:
cpsw_ale_del_vlan(priv->ale, vid, 0);
return ret;
@@ -1746,7 +1738,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret;
ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
@@ -2157,7 +2149,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
priv_sl2->regs = priv->regs;
- priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
priv_sl2->hw_stats = priv->hw_stats;
@@ -2326,7 +2317,6 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
- priv->host_port = HOST_PORT_NUM;
/* Need to enable clocks with runtime PM api to access module
* registers
@@ -2515,8 +2505,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
@@ -2544,8 +2532,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
pm_runtime_disable(&pdev->dev);
device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1d0942c53..32516661f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret)
goto drop;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
/* Check Tx pool count & stop subqueue if needed */
desc_count = knav_pool_count(netcp->tx_pool);
@@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev)
dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_tx_wake_all_queues(ndev);
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd49a..561703317 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev)
tlan_reset_lists(dev);
tlan_read_and_clear_stats(dev, TLAN_IGNORE);
tlan_reset_adapter(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc07..11213a38c 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 298e059d0..922a443e3 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Save the timestamp. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#ifdef TILE_NET_PARANOIA
@@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev)
{
PDEBUG("tile_net_tx_timeout()\n");
PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev->trans_start);
+ jiffies - dev_trans_start(dev));
/* XXX: ISSUE: This doesn't seem useful for us. */
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 743b18266..446ea580a 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1616,13 +1616,13 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
target->valid = 1;
target->eurus_index = i;
kfree(target->hwinfo);
- target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
+ target->hwinfo = kmemdup(scan_info,
+ be16_to_cpu(scan_info->size),
GFP_KERNEL);
if (!target->hwinfo)
continue;
/* copy hw scan info */
- memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index feb20cfbd..497e725ee 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
wmb();
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+ netif_trans_update(card->netdev); /* set netdev watchdog timer */
return 0;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 520cf50a3..01a77145a 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1314,7 +1314,8 @@ static int tsi108_open(struct net_device *dev)
data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
GFP_KERNEL);
if (!data->txring) {
- pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+ pci_free_consistent(NULL, rxring_size, data->rxring,
+ data->rxdma);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 2b7550c43..9d14731cd 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index f98b91d21..1981e88c1 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -69,4 +69,18 @@ config WIZNET_BUS_ANY
Performance may decrease compared to explicitly selected bus mode.
endchoice
+config WIZNET_W5100_SPI
+ tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode"
+ depends on WIZNET_BUS_ANY && WIZNET_W5100
+ depends on SPI
+ ---help---
+ In SPI mode host system accesses registers using SPI protocol
+ (mode 0) on the SPI bus.
+
+ Performance decreases compared to other bus interface mode.
+ In W5100 SPI mode, burst READ/WRITE processing are not provided.
+
+ To compile this driver as a module, choose M here: the module
+ will be called w5100-spi.
+
endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
index c61453522..1e05e1a84 100644
--- a/drivers/net/ethernet/wiznet/Makefile
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o
obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
new file mode 100644
index 000000000..93a2d3c07
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -0,0 +1,466 @@
+/*
+ * Ethernet driver for the WIZnet W5100/W5200/W5500 chip.
+ *
+ * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Datasheet:
+ * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf
+ * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf
+ * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/spi/spi.h>
+
+#include "w5100.h"
+
+#define W5100_SPI_WRITE_OPCODE 0xf0
+#define W5100_SPI_READ_OPCODE 0x0f
+
+static int w5100_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data};
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5100_spi_read16(struct net_device *ndev, u32 addr)
+{
+ u16 data;
+ int ret;
+
+ ret = w5100_spi_read(ndev, addr);
+ if (ret < 0)
+ return ret;
+ data = ret << 8;
+ ret = w5100_spi_read(ndev, addr + 1);
+
+ return ret < 0 ? ret : data | ret;
+}
+
+static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ int ret;
+
+ ret = w5100_spi_write(ndev, addr, data >> 8);
+ if (ret)
+ return ret;
+
+ return w5100_spi_write(ndev, addr + 1, data & 0xff);
+}
+
+static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_read(ndev, addr + i);
+
+ if (ret < 0)
+ return ret;
+ buf[i] = ret;
+ }
+
+ return 0;
+}
+
+static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_write(ndev, addr + i, buf[i]);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_spi_ops = {
+ .may_sleep = true,
+ .chip_id = W5100,
+ .read = w5100_spi_read,
+ .write = w5100_spi_write,
+ .read16 = w5100_spi_read16,
+ .write16 = w5100_spi_write16,
+ .readbulk = w5100_spi_readbulk,
+ .writebulk = w5100_spi_writebulk,
+};
+
+#define W5200_SPI_WRITE_OPCODE 0x80
+
+struct w5200_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[4] ____cacheline_aligned;
+};
+
+static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5200_spi_init(struct net_device *ndev)
+{
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5200_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[6] = {
+ addr >> 8, addr & 0xff,
+ W5200_SPI_WRITE_OPCODE, 2,
+ data >> 8, data & 0xff
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = len >> 8;
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8);
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5200_ops = {
+ .may_sleep = true,
+ .chip_id = W5200,
+ .read = w5200_spi_read,
+ .write = w5200_spi_write,
+ .read16 = w5200_spi_read16,
+ .write16 = w5200_spi_write16,
+ .readbulk = w5200_spi_readbulk,
+ .writebulk = w5200_spi_writebulk,
+ .init = w5200_spi_init,
+};
+
+#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f)
+#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3)
+#define W5500_SPI_WRITE_CONTROL(addr) \
+ ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2))
+
+struct w5500_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[3] ____cacheline_aligned;
+};
+
+static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5500_spi_init(struct net_device *ndev)
+{
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5500_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data >> 8,
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5500_ops = {
+ .may_sleep = true,
+ .chip_id = W5500,
+ .read = w5500_spi_read,
+ .write = w5500_spi_write,
+ .read16 = w5500_spi_read16,
+ .write16 = w5500_spi_write16,
+ .readbulk = w5500_spi_readbulk,
+ .writebulk = w5500_spi_writebulk,
+ .init = w5500_spi_init,
+};
+
+static int w5100_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct w5100_ops *ops;
+ int priv_size;
+ const void *mac = of_get_mac_address(spi->dev.of_node);
+
+ switch (id->driver_data) {
+ case W5100:
+ ops = &w5100_spi_ops;
+ priv_size = 0;
+ break;
+ case W5200:
+ ops = &w5200_ops;
+ priv_size = sizeof(struct w5200_spi_priv);
+ break;
+ case W5500:
+ ops = &w5500_ops;
+ priv_size = sizeof(struct w5500_spi_priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
+}
+
+static int w5100_spi_remove(struct spi_device *spi)
+{
+ return w5100_remove(&spi->dev);
+}
+
+static const struct spi_device_id w5100_spi_ids[] = {
+ { "w5100", W5100 },
+ { "w5200", W5200 },
+ { "w5500", W5500 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, w5100_spi_ids);
+
+static struct spi_driver w5100_spi_driver = {
+ .driver = {
+ .name = "w5100",
+ .pm = &w5100_pm_ops,
+ },
+ .probe = w5100_spi_probe,
+ .remove = w5100_spi_remove,
+ .id_table = w5100_spi_ids,
+};
+module_spi_driver(w5100_spi_driver);
+
+MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 8b282d0b1..4f6255cf6 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -27,6 +27,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
+#include "w5100.h"
+
#define DRV_NAME "w5100"
#define DRV_VERSION "2012-04-04"
@@ -36,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME);
MODULE_LICENSE("GPL");
/*
- * Registers
+ * W5100/W5200/W5500 common registers
*/
#define W5100_COMMON_REGS 0x0000
#define W5100_MR 0x0000 /* Mode Register */
@@ -46,55 +48,115 @@ MODULE_LICENSE("GPL");
#define MR_IND 0x01 /* Indirect mode */
#define W5100_SHAR 0x0009 /* Source MAC address */
#define W5100_IR 0x0015 /* Interrupt Register */
-#define W5100_IMR 0x0016 /* Interrupt Mask Register */
-#define IR_S0 0x01 /* S0 interrupt */
-#define W5100_RTR 0x0017 /* Retry Time-value Register */
-#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
-#define W5100_RMSR 0x001a /* Receive Memory Size */
-#define W5100_TMSR 0x001b /* Transmit Memory Size */
#define W5100_COMMON_REGS_LEN 0x0040
-#define W5100_S0_REGS 0x0400
-#define W5100_S0_MR 0x0400 /* S0 Mode Register */
-#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */
-#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
-#define W5100_S0_CR 0x0401 /* S0 Command Register */
+#define W5100_Sn_MR 0x0000 /* Sn Mode Register */
+#define W5100_Sn_CR 0x0001 /* Sn Command Register */
+#define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
+#define W5100_Sn_SR 0x0003 /* Sn Status Register */
+#define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
+#define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
+#define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
+#define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
+#define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
+
+#define S0_REGS(priv) ((priv)->s0_regs)
+
+#define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
+#define S0_MR_MACRAW 0x04 /* MAC RAW mode */
+#define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
+#define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
+#define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
#define S0_CR_OPEN 0x01 /* OPEN command */
#define S0_CR_CLOSE 0x10 /* CLOSE command */
#define S0_CR_SEND 0x20 /* SEND command */
#define S0_CR_RECV 0x40 /* RECV command */
-#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
+#define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
#define S0_IR_SENDOK 0x10 /* complete sending */
#define S0_IR_RECV 0x04 /* receiving data */
-#define W5100_S0_SR 0x0403 /* S0 Status Register */
+#define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
#define S0_SR_MACRAW 0x42 /* mac raw mode */
-#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
-#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
-#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
-#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
-#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
+#define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
+#define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
+#define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
+#define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
+
#define W5100_S0_REGS_LEN 0x0040
+/*
+ * W5100 and W5200 common registers
+ */
+#define W5100_IMR 0x0016 /* Interrupt Mask Register */
+#define IR_S0 0x01 /* S0 interrupt */
+#define W5100_RTR 0x0017 /* Retry Time-value Register */
+#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
+
+/*
+ * W5100 specific register and memory
+ */
+#define W5100_RMSR 0x001a /* Receive Memory Size */
+#define W5100_TMSR 0x001b /* Transmit Memory Size */
+
+#define W5100_S0_REGS 0x0400
+
#define W5100_TX_MEM_START 0x4000
-#define W5100_TX_MEM_END 0x5fff
-#define W5100_TX_MEM_MASK 0x1fff
+#define W5100_TX_MEM_SIZE 0x2000
#define W5100_RX_MEM_START 0x6000
-#define W5100_RX_MEM_END 0x7fff
-#define W5100_RX_MEM_MASK 0x1fff
+#define W5100_RX_MEM_SIZE 0x2000
+
+/*
+ * W5200 specific register and memory
+ */
+#define W5200_S0_REGS 0x4000
+
+#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
+#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
+
+#define W5200_TX_MEM_START 0x8000
+#define W5200_TX_MEM_SIZE 0x4000
+#define W5200_RX_MEM_START 0xc000
+#define W5200_RX_MEM_SIZE 0x4000
+
+/*
+ * W5500 specific register and memory
+ *
+ * W5500 register and memory are organized by multiple blocks. Each one is
+ * selected by 16bits offset address and 5bits block select bits. So we
+ * encode it into 32bits address. (lower 16bits is offset address and
+ * upper 16bits is block select bits)
+ */
+#define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
+#define W5500_RTR 0x0019 /* Retry Time-value Register */
+
+#define W5500_S0_REGS 0x10000
+
+#define W5500_Sn_RXMEM_SIZE(n) \
+ (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
+#define W5500_Sn_TXMEM_SIZE(n) \
+ (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
+
+#define W5500_TX_MEM_START 0x20000
+#define W5500_TX_MEM_SIZE 0x04000
+#define W5500_RX_MEM_START 0x30000
+#define W5500_RX_MEM_SIZE 0x04000
/*
* Device driver private data structure
*/
+
struct w5100_priv {
- void __iomem *base;
- spinlock_t reg_lock;
- bool indirect;
- u8 (*read)(struct w5100_priv *priv, u16 addr);
- void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
- u16 (*read16)(struct w5100_priv *priv, u16 addr);
- void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
- void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
- void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+ const struct w5100_ops *ops;
+
+ /* Socket 0 register offset address */
+ u32 s0_regs;
+ /* Socket 0 TX buffer offset address and size */
+ u32 s0_tx_buf;
+ u16 s0_tx_buf_size;
+ /* Socket 0 RX buffer offset address and size */
+ u32 s0_rx_buf;
+ u16 s0_rx_buf_size;
+
int irq;
int link_irq;
int link_gpio;
@@ -103,6 +165,13 @@ struct w5100_priv {
struct net_device *ndev;
bool promisc;
u32 msg_enable;
+
+ struct workqueue_struct *xfer_wq;
+ struct work_struct rx_work;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct setrx_work;
+ struct work_struct restart_work;
};
/************************************************************************
@@ -111,63 +180,122 @@ struct w5100_priv {
*
***********************************************************************/
+struct w5100_mmio_priv {
+ void __iomem *base;
+ /* Serialize access in indirect address mode */
+ spinlock_t reg_lock;
+};
+
+static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
+{
+ return w5100_ops_priv(dev);
+}
+
+static inline void __iomem *w5100_mmio(struct net_device *ndev)
+{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+
+ return mmio_priv->base;
+}
+
/*
* In direct address mode host system can directly access W5100 registers
* after mapping to Memory-Mapped I/O space.
*
* 0x8000 bytes are required for memory space.
*/
-static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
{
- return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
}
-static inline void w5100_write_direct(struct w5100_priv *priv,
- u16 addr, u8 data)
+static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
+ u8 data)
{
- iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
+
+ return 0;
}
-static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
+{
+ __w5100_write_direct(ndev, addr, data);
+ mmiowb();
+
+ return 0;
+}
+
+static int w5100_read16_direct(struct net_device *ndev, u32 addr)
{
u16 data;
- data = w5100_read_direct(priv, addr) << 8;
- data |= w5100_read_direct(priv, addr + 1);
+ data = w5100_read_direct(ndev, addr) << 8;
+ data |= w5100_read_direct(ndev, addr + 1);
return data;
}
-static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
{
- w5100_write_direct(priv, addr, data >> 8);
- w5100_write_direct(priv, addr + 1, data);
+ __w5100_write_direct(ndev, addr, data >> 8);
+ __w5100_write_direct(ndev, addr + 1, data);
+ mmiowb();
+
+ return 0;
}
-static void w5100_readbuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END))
- addr = W5100_RX_MEM_START;
- *buf++ = w5100_read_direct(priv, addr);
- }
+ for (i = 0; i < len; i++, addr++)
+ *buf++ = w5100_read_direct(ndev, addr);
+
+ return 0;
}
-static void w5100_writebuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END))
- addr = W5100_TX_MEM_START;
- w5100_write_direct(priv, addr, *buf++);
- }
+ for (i = 0; i < len; i++, addr++)
+ __w5100_write_direct(ndev, addr, *buf++);
+
+ mmiowb();
+
+ return 0;
}
+static int w5100_mmio_init(struct net_device *ndev)
+{
+ struct platform_device *pdev = to_platform_device(ndev->dev.parent);
+ struct w5100_priv *priv = netdev_priv(ndev);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+ struct resource *mem;
+
+ spin_lock_init(&mmio_priv->reg_lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mmio_priv->base))
+ return PTR_ERR(mmio_priv->base);
+
+ netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_direct_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_direct,
+ .write = w5100_write_direct,
+ .read16 = w5100_read16_direct,
+ .write16 = w5100_write16_direct,
+ .readbulk = w5100_readbulk_direct,
+ .writebulk = w5100_writebulk_direct,
+ .init = w5100_mmio_init,
+};
+
/*
* In indirect address mode host system indirectly accesses registers by
* using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
@@ -179,139 +307,290 @@ static void w5100_writebuf_direct(struct w5100_priv *priv,
#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
-static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u8 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u16 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
- data |= w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
+ data |= w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_readbuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END)) {
- addr = W5100_RX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_writebuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END)) {
- addr = W5100_TX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- w5100_write_direct(priv, W5100_IDM_DR, *buf++);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
+static int w5100_reset_indirect(struct net_device *ndev)
+{
+ w5100_write_direct(ndev, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_indirect_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_indirect,
+ .write = w5100_write_indirect,
+ .read16 = w5100_read16_indirect,
+ .write16 = w5100_write16_indirect,
+ .readbulk = w5100_readbulk_indirect,
+ .writebulk = w5100_writebulk_indirect,
+ .init = w5100_mmio_init,
+ .reset = w5100_reset_indirect,
+};
+
#if defined(CONFIG_WIZNET_BUS_DIRECT)
-#define w5100_read w5100_read_direct
-#define w5100_write w5100_write_direct
-#define w5100_read16 w5100_read16_direct
-#define w5100_write16 w5100_write16_direct
-#define w5100_readbuf w5100_readbuf_direct
-#define w5100_writebuf w5100_writebuf_direct
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_direct(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_direct(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_direct(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_direct(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_direct(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_direct(priv->ndev, addr, buf, len);
+}
#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
-#define w5100_read w5100_read_indirect
-#define w5100_write w5100_write_indirect
-#define w5100_read16 w5100_read16_indirect
-#define w5100_write16 w5100_write16_indirect
-#define w5100_readbuf w5100_readbuf_indirect
-#define w5100_writebuf w5100_writebuf_indirect
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_indirect(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_indirect(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
+}
#else /* CONFIG_WIZNET_BUS_ANY */
-#define w5100_read priv->read
-#define w5100_write priv->write
-#define w5100_read16 priv->read16
-#define w5100_write16 priv->write16
-#define w5100_readbuf priv->readbuf
-#define w5100_writebuf priv->writebuf
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return priv->ops->write(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read16(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return priv->ops->write16(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return priv->ops->readbulk(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return priv->ops->writebulk(priv->ndev, addr, buf, len);
+}
+
#endif
+static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
+{
+ u32 addr;
+ int remain = 0;
+ int ret;
+ const u32 mem_start = priv->s0_rx_buf;
+ const u16 mem_size = priv->s0_rx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_readbulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_readbulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
+ int len)
+{
+ u32 addr;
+ int ret;
+ int remain = 0;
+ const u32 mem_start = priv->s0_tx_buf;
+ const u16 mem_size = priv->s0_tx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_writebulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_writebulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_reset(struct w5100_priv *priv)
+{
+ if (priv->ops->reset)
+ return priv->ops->reset(priv->ndev);
+
+ w5100_write(priv, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write(priv, W5100_MR, MR_PB);
+
+ return 0;
+}
+
static int w5100_command(struct w5100_priv *priv, u16 cmd)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ unsigned long timeout;
- w5100_write(priv, W5100_S0_CR, cmd);
- mmiowb();
+ w5100_write(priv, W5100_S0_CR(priv), cmd);
+
+ timeout = jiffies + msecs_to_jiffies(100);
- while (w5100_read(priv, W5100_S0_CR) != 0) {
+ while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
if (time_after(jiffies, timeout))
return -EIO;
cpu_relax();
@@ -323,47 +602,124 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd)
static void w5100_write_macaddr(struct w5100_priv *priv)
{
struct net_device *ndev = priv->ndev;
- int i;
- for (i = 0; i < ETH_ALEN; i++)
- w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
- mmiowb();
+ w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
}
-static void w5100_hw_reset(struct w5100_priv *priv)
+static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
{
- w5100_write_direct(priv, W5100_MR, MR_RST);
- mmiowb();
- mdelay(5);
- w5100_write_direct(priv, W5100_MR, priv->indirect ?
- MR_PB | MR_AI | MR_IND :
- MR_PB);
- mmiowb();
- w5100_write(priv, W5100_IMR, 0);
- w5100_write_macaddr(priv);
+ u32 imr;
+
+ if (priv->ops->chip_id == W5500)
+ imr = W5500_SIMR;
+ else
+ imr = W5100_IMR;
+
+ w5100_write(priv, imr, mask);
+}
+static void w5100_enable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, IR_S0);
+}
+
+static void w5100_disable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, 0);
+}
+
+static void w5100_memory_configure(struct w5100_priv *priv)
+{
/* Configure 16K of internal memory
* as 8K RX buffer and 8K TX buffer
*/
w5100_write(priv, W5100_RMSR, 0x03);
w5100_write(priv, W5100_TMSR, 0x03);
- mmiowb();
+}
+
+static void w5200_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static void w5500_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static int w5100_hw_reset(struct w5100_priv *priv)
+{
+ u32 rtr;
+
+ w5100_reset(priv);
+
+ w5100_disable_intr(priv);
+ w5100_write_macaddr(priv);
+
+ switch (priv->ops->chip_id) {
+ case W5100:
+ w5100_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5200:
+ w5200_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5500:
+ w5500_memory_configure(priv);
+ rtr = W5500_RTR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (w5100_read16(priv, rtr) != RTR_DEFAULT)
+ return -ENODEV;
+
+ return 0;
}
static void w5100_hw_start(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_S0_MR, priv->promisc ?
- S0_MR_MACRAW : S0_MR_MACRAW_MF);
- mmiowb();
+ u8 mode = S0_MR_MACRAW;
+
+ if (!priv->promisc) {
+ if (priv->ops->chip_id == W5500)
+ mode |= W5500_S0_MR_MF;
+ else
+ mode |= S0_MR_MF;
+ }
+
+ w5100_write(priv, W5100_S0_MR(priv), mode);
w5100_command(priv, S0_CR_OPEN);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
static void w5100_hw_close(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
w5100_command(priv, S0_CR_CLOSE);
}
@@ -412,20 +768,17 @@ static int w5100_get_regs_len(struct net_device *ndev)
}
static void w5100_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *_buf)
+ struct ethtool_regs *regs, void *buf)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u8 *buf = _buf;
- u16 i;
regs->version = 1;
- for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
- for (i = 0; i < W5100_S0_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_S0_REGS + i);
+ w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
+ buf += W5100_COMMON_REGS_LEN;
+ w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_restart(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -433,74 +786,138 @@ static void w5100_tx_timeout(struct net_device *ndev)
w5100_hw_reset(priv);
w5100_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static void w5100_restart_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ restart_work);
+
+ w5100_restart(priv->ndev);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u16 offset;
- netif_stop_queue(ndev);
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->restart_work);
+ else
+ w5100_restart(ndev);
+}
- offset = w5100_read16(priv, W5100_S0_TX_WR);
+static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+ u16 offset;
+
+ offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
w5100_writebuf(priv, offset, skb->data, skb->len);
- w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
- mmiowb();
+ w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
dev_kfree_skb(skb);
w5100_command(priv, S0_CR_SEND);
+}
+
+static void w5100_tx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ tx_work);
+ struct sk_buff *skb = priv->tx_skb;
+
+ priv->tx_skb = NULL;
+
+ if (WARN_ON(!skb))
+ return;
+ w5100_tx_skb(priv->ndev, skb);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ if (priv->ops->may_sleep) {
+ WARN_ON(priv->tx_skb);
+ priv->tx_skb = skb;
+ queue_work(priv->xfer_wq, &priv->tx_work);
+ } else {
+ w5100_tx_skb(ndev, skb);
+ }
return NETDEV_TX_OK;
}
-static int w5100_napi_poll(struct napi_struct *napi, int budget)
+static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
{
- struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
- struct net_device *ndev = priv->ndev;
+ struct w5100_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
- int rx_count;
u16 rx_len;
u16 offset;
u8 header[2];
+ u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
- for (rx_count = 0; rx_count < budget; rx_count++) {
- u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
- if (rx_buf_len == 0)
- break;
+ if (rx_buf_len == 0)
+ return NULL;
- offset = w5100_read16(priv, W5100_S0_RX_RD);
- w5100_readbuf(priv, offset, header, 2);
- rx_len = get_unaligned_be16(header) - 2;
-
- skb = netdev_alloc_skb_ip_align(ndev, rx_len);
- if (unlikely(!skb)) {
- w5100_write16(priv, W5100_S0_RX_RD,
- offset + rx_buf_len);
- w5100_command(priv, S0_CR_RECV);
- ndev->stats.rx_dropped++;
- return -ENOMEM;
- }
+ offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
+ w5100_readbuf(priv, offset, header, 2);
+ rx_len = get_unaligned_be16(header) - 2;
- skb_put(skb, rx_len);
- w5100_readbuf(priv, offset + 2, skb->data, rx_len);
- w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
- mmiowb();
+ skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+ if (unlikely(!skb)) {
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
w5100_command(priv, S0_CR_RECV);
- skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_dropped++;
+ return NULL;
+ }
+
+ skb_put(skb, rx_len);
+ w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
+ w5100_command(priv, S0_CR_RECV);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += rx_len;
+
+ return skb;
+}
+
+static void w5100_rx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = w5100_rx_skb(priv->ndev)))
+ netif_rx_ni(skb);
+
+ w5100_enable_intr(priv);
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+ int rx_count;
+
+ for (rx_count = 0; rx_count < budget; rx_count++) {
+ struct sk_buff *skb = w5100_rx_skb(priv->ndev);
- netif_receive_skb(skb);
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += rx_len;
+ if (skb)
+ netif_receive_skb(skb);
+ else
+ break;
}
if (rx_count < budget) {
napi_complete(napi);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
return rx_count;
@@ -511,11 +928,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
struct net_device *ndev = ndev_instance;
struct w5100_priv *priv = netdev_priv(ndev);
- int ir = w5100_read(priv, W5100_S0_IR);
+ int ir = w5100_read(priv, W5100_S0_IR(priv));
if (!ir)
return IRQ_NONE;
- w5100_write(priv, W5100_S0_IR, ir);
- mmiowb();
+ w5100_write(priv, W5100_S0_IR(priv), ir);
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -523,11 +939,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
}
if (ir & S0_IR_RECV) {
- if (napi_schedule_prep(&priv->napi)) {
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
+
+ if (priv->ops->may_sleep)
+ queue_work(priv->xfer_wq, &priv->rx_work);
+ else if (napi_schedule_prep(&priv->napi))
__napi_schedule(&priv->napi);
- }
}
return IRQ_HANDLED;
@@ -551,6 +968,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
return IRQ_HANDLED;
}
+static void w5100_setrx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ setrx_work);
+
+ w5100_hw_start(priv);
+}
+
static void w5100_set_rx_mode(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -558,7 +983,11 @@ static void w5100_set_rx_mode(struct net_device *ndev)
if (priv->promisc != set_promisc) {
priv->promisc = set_promisc;
- w5100_hw_start(priv);
+
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->setrx_work);
+ else
+ w5100_hw_start(priv);
}
}
@@ -620,95 +1049,100 @@ static const struct net_device_ops w5100_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int w5100_hw_probe(struct platform_device *pdev)
+static int w5100_mmio_probe(struct platform_device *pdev)
{
struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct w5100_priv *priv = netdev_priv(ndev);
- const char *name = netdev_name(ndev);
+ const void *mac_addr = NULL;
struct resource *mem;
- int mem_size;
+ const struct w5100_ops *ops;
int irq;
- int ret;
- if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(ndev);
- }
+ if (data && is_valid_ether_addr(data->mac_addr))
+ mac_addr = data->mac_addr;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- mem_size = resource_size(mem);
-
- spin_lock_init(&priv->reg_lock);
- priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
- if (priv->indirect) {
- priv->read = w5100_read_indirect;
- priv->write = w5100_write_indirect;
- priv->read16 = w5100_read16_indirect;
- priv->write16 = w5100_write16_indirect;
- priv->readbuf = w5100_readbuf_indirect;
- priv->writebuf = w5100_writebuf_indirect;
- } else {
- priv->read = w5100_read_direct;
- priv->write = w5100_write_direct;
- priv->read16 = w5100_read16_direct;
- priv->write16 = w5100_write16_direct;
- priv->readbuf = w5100_readbuf_direct;
- priv->writebuf = w5100_writebuf_direct;
- }
-
- w5100_hw_reset(priv);
- if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
- return -ENODEV;
+ if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
+ ops = &w5100_mmio_indirect_ops;
+ else
+ ops = &w5100_mmio_direct_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = request_irq(irq, w5100_interrupt,
- IRQ_TYPE_LEVEL_LOW, name, ndev);
- if (ret < 0)
- return ret;
- priv->irq = irq;
- priv->link_gpio = data ? data->link_gpio : -EINVAL;
- if (gpio_is_valid(priv->link_gpio)) {
- char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
- if (!link_name)
- return -ENOMEM;
- snprintf(link_name, 16, "%s-link", name);
- priv->link_irq = gpio_to_irq(priv->link_gpio);
- if (request_any_context_irq(priv->link_irq, w5100_detect_link,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- link_name, priv->ndev) < 0)
- priv->link_gpio = -EINVAL;
- }
+ return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
+ mac_addr, irq, data ? data->link_gpio : -EINVAL);
+}
- netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
- return 0;
+static int w5100_mmio_remove(struct platform_device *pdev)
+{
+ return w5100_remove(&pdev->dev);
+}
+
+void *w5100_ops_priv(const struct net_device *ndev)
+{
+ return netdev_priv(ndev) +
+ ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
}
+EXPORT_SYMBOL_GPL(w5100_ops_priv);
-static int w5100_probe(struct platform_device *pdev)
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio)
{
struct w5100_priv *priv;
struct net_device *ndev;
int err;
+ size_t alloc_size;
- ndev = alloc_etherdev(sizeof(*priv));
+ alloc_size = sizeof(*priv);
+ if (sizeof_ops_priv) {
+ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
+ alloc_size += sizeof_ops_priv;
+ }
+ alloc_size += NETDEV_ALIGN - 1;
+
+ ndev = alloc_etherdev(alloc_size);
if (!ndev)
return -ENOMEM;
- SET_NETDEV_DEV(ndev, &pdev->dev);
- platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
priv = netdev_priv(ndev);
+
+ switch (ops->chip_id) {
+ case W5100:
+ priv->s0_regs = W5100_S0_REGS;
+ priv->s0_tx_buf = W5100_TX_MEM_START;
+ priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5100_RX_MEM_START;
+ priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
+ break;
+ case W5200:
+ priv->s0_regs = W5200_S0_REGS;
+ priv->s0_tx_buf = W5200_TX_MEM_START;
+ priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5200_RX_MEM_START;
+ priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
+ break;
+ case W5500:
+ priv->s0_regs = W5500_S0_REGS;
+ priv->s0_tx_buf = W5500_TX_MEM_START;
+ priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5500_RX_MEM_START;
+ priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_register;
+ }
+
priv->ndev = ndev;
+ priv->ops = ops;
+ priv->irq = irq;
+ priv->link_gpio = link_gpio;
ndev->netdev_ops = &w5100_netdev_ops;
ndev->ethtool_ops = &w5100_ethtool_ops;
- ndev->watchdog_timeo = HZ;
netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
/* This chip doesn't support VLAN packets with normal MTU,
@@ -720,22 +1154,76 @@ static int w5100_probe(struct platform_device *pdev)
if (err < 0)
goto err_register;
- err = w5100_hw_probe(pdev);
- if (err < 0)
- goto err_hw_probe;
+ priv->xfer_wq = create_workqueue(netdev_name(ndev));
+ if (!priv->xfer_wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ INIT_WORK(&priv->rx_work, w5100_rx_work);
+ INIT_WORK(&priv->tx_work, w5100_tx_work);
+ INIT_WORK(&priv->setrx_work, w5100_setrx_work);
+ INIT_WORK(&priv->restart_work, w5100_restart_work);
+
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
+
+ if (priv->ops->init) {
+ err = priv->ops->init(priv->ndev);
+ if (err)
+ goto err_hw;
+ }
+
+ err = w5100_hw_reset(priv);
+ if (err)
+ goto err_hw;
+
+ if (ops->may_sleep) {
+ err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ netdev_name(ndev), ndev);
+ } else {
+ err = request_irq(priv->irq, w5100_interrupt,
+ IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
+ }
+ if (err)
+ goto err_hw;
+
+ if (gpio_is_valid(priv->link_gpio)) {
+ char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
+
+ if (!link_name) {
+ err = -ENOMEM;
+ goto err_gpio;
+ }
+ snprintf(link_name, 16, "%s-link", netdev_name(ndev));
+ priv->link_irq = gpio_to_irq(priv->link_gpio);
+ if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ link_name, priv->ndev) < 0)
+ priv->link_gpio = -EINVAL;
+ }
return 0;
-err_hw_probe:
+err_gpio:
+ free_irq(priv->irq, ndev);
+err_hw:
+ destroy_workqueue(priv->xfer_wq);
+err_wq:
unregister_netdev(ndev);
err_register:
free_netdev(ndev);
return err;
}
+EXPORT_SYMBOL_GPL(w5100_probe);
-static int w5100_remove(struct platform_device *pdev)
+int w5100_remove(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
w5100_hw_reset(priv);
@@ -743,16 +1231,21 @@ static int w5100_remove(struct platform_device *pdev)
if (gpio_is_valid(priv->link_gpio))
free_irq(priv->link_irq, ndev);
+ flush_work(&priv->setrx_work);
+ flush_work(&priv->restart_work);
+ flush_workqueue(priv->xfer_wq);
+ destroy_workqueue(priv->xfer_wq);
+
unregister_netdev(ndev);
free_netdev(ndev);
return 0;
}
+EXPORT_SYMBOL_GPL(w5100_remove);
#ifdef CONFIG_PM_SLEEP
static int w5100_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -766,8 +1259,7 @@ static int w5100_suspend(struct device *dev)
static int w5100_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -783,15 +1275,15 @@ static int w5100_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+EXPORT_SYMBOL_GPL(w5100_pm_ops);
-static struct platform_driver w5100_driver = {
+static struct platform_driver w5100_mmio_driver = {
.driver = {
.name = DRV_NAME,
.pm = &w5100_pm_ops,
},
- .probe = w5100_probe,
- .remove = w5100_remove,
+ .probe = w5100_mmio_probe,
+ .remove = w5100_mmio_remove,
};
-
-module_platform_driver(w5100_driver);
+module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
new file mode 100644
index 000000000..17983a3b8
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -0,0 +1,37 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+enum {
+ W5100,
+ W5200,
+ W5500,
+};
+
+struct w5100_ops {
+ bool may_sleep;
+ int chip_id;
+ int (*read)(struct net_device *ndev, u32 addr);
+ int (*write)(struct net_device *ndev, u32 addr, u8 data);
+ int (*read16)(struct net_device *ndev, u32 addr);
+ int (*write16)(struct net_device *ndev, u32 addr, u16 data);
+ int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
+ int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len);
+ int (*reset)(struct net_device *ndev);
+ int (*init)(struct net_device *ndev);
+};
+
+void *w5100_ops_priv(const struct net_device *ndev);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio);
+int w5100_remove(struct device *dev);
+
+extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 8da7b930f..0b37ce9f2 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
w5300_hw_reset(priv);
w5300_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 5a1068df7..739708712 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -584,7 +584,7 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(ndev); /* prevent tx timeout */
}
static void temac_adjust_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 468464470..8c7f5be51 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
}
/**
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index e324b3092..3cee84a24 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
}
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index d56f86932..7b44968e0 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/fddi/skfp/Makefile b/drivers/net/fddi/skfp/Makefile
index b0be0234a..a957a1c7e 100644
--- a/drivers/net/fddi/skfp/Makefile
+++ b/drivers/net/fddi/skfp/Makefile
@@ -17,4 +17,4 @@ skfp-objs := skfddi.o hwmtm.o fplustm.o smt.o cfm.o \
# projects. To keep the source common for all those drivers (and
# thus simplify fixes to it), please do not clean it up!
-ccflags-y := -Idrivers/net/skfp -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
+ccflags-y := -DPCI -DMEM_MAPPED_IO -Wno-strict-prototypes
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b103adb8d..0dbafedc0 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -179,6 +179,8 @@ void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
for (i = 0; i < EP_BUFFER_SUPPORT_VLAN_MAX; i++)
info->v1i.vlan_id[i] = vlan_id[i];
+
+ info->v1i.rx_status |= FJES_RX_MTU_CHANGING_DONE;
}
void
@@ -214,6 +216,7 @@ static int fjes_hw_setup(struct fjes_hw *hw)
u8 mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
struct fjes_device_command_param param;
struct ep_share_mem_info *buf_pair;
+ unsigned long flags;
size_t mem_size;
int result;
int epidx;
@@ -262,10 +265,12 @@ static int fjes_hw_setup(struct fjes_hw *hw)
if (result)
return result;
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, mac,
fjes_support_mtu[0]);
fjes_hw_setup_epbuf(&buf_pair->rx, mac,
fjes_support_mtu[0]);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
@@ -327,6 +332,7 @@ int fjes_hw_init(struct fjes_hw *hw)
INIT_WORK(&hw->epstop_task, fjes_hw_epstop_task);
mutex_init(&hw->hw_info.lock);
+ spin_lock_init(&hw->rx_status_lock);
hw->max_epid = fjes_hw_get_max_epid(hw);
hw->my_epid = fjes_hw_get_my_epid(hw);
@@ -734,6 +740,7 @@ fjes_hw_get_partner_ep_status(struct fjes_hw *hw, int epid)
void fjes_hw_raise_epstop(struct fjes_hw *hw)
{
enum ep_partner_status status;
+ unsigned long flags;
int epidx;
for (epidx = 0; epidx < hw->max_epid; epidx++) {
@@ -753,8 +760,10 @@ void fjes_hw_raise_epstop(struct fjes_hw *hw)
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
}
@@ -810,7 +819,8 @@ bool fjes_hw_check_mtu(struct epbuf_handler *epbh, u32 mtu)
{
union ep_buffer_info *info = epbh->info;
- return (info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu));
+ return ((info->v1i.frame_max == FJES_MTU_TO_FRAME_SIZE(mtu)) &&
+ info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE);
}
bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id)
@@ -863,6 +873,9 @@ bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh)
{
union ep_buffer_info *info = epbh->info;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return true;
+
if (info->v1i.count_max == 0)
return true;
@@ -932,6 +945,7 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
struct fjes_adapter *adapter;
struct net_device *netdev;
+ unsigned long flags;
ulong unshare_bit = 0;
ulong share_bit = 0;
@@ -1024,8 +1038,10 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
continue;
if (test_bit(epidx, &share_bit)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
mutex_lock(&hw->hw_info.lock);
@@ -1069,10 +1085,14 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
mutex_unlock(&hw->hw_info.lock);
- if (ret == 0)
+ if (ret == 0) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
+ }
}
if (test_bit(epidx, &irq_bit)) {
@@ -1080,9 +1100,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
REG_ICTL_MASK_TXRX_STOP_REQ);
set_bit(epidx, &hw->txrx_stop_req_bit);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.
info->v1i.rx_status |=
FJES_RX_STOP_REQ_REQUEST;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
set_bit(epidx, &hw->hw_info.buffer_unshare_reserve_bit);
}
}
@@ -1098,6 +1120,7 @@ static void fjes_hw_epstop_task(struct work_struct *work)
{
struct fjes_hw *hw = container_of(work, struct fjes_hw, epstop_task);
struct fjes_adapter *adapter = (struct fjes_adapter *)hw->back;
+ unsigned long flags;
ulong remain_bit;
int epid_bit;
@@ -1105,9 +1128,12 @@ static void fjes_hw_epstop_task(struct work_struct *work)
while ((remain_bit = hw->epstop_req_bit)) {
for (epid_bit = 0; remain_bit; remain_bit >>= 1, epid_bit++) {
if (remain_bit & 1) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epid_bit].
tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epid_bit, &hw->epstop_req_bit);
set_bit(epid_bit,
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index 6d57b89a0..1445ac99d 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -33,9 +33,9 @@ struct fjes_hw;
#define EP_BUFFER_SUPPORT_VLAN_MAX 4
#define EP_BUFFER_INFO_SIZE 4096
-#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3) /* sec */
-#define FJES_COMMAND_REQ_TIMEOUT (5 + 1) /* sec */
-#define FJES_COMMAND_REQ_BUFF_TIMEOUT (8 * 3) /* sec */
+#define FJES_DEVICE_RESET_TIMEOUT ((17 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_TIMEOUT ((5 + 1) * 3 * 8) /* sec */
+#define FJES_COMMAND_REQ_BUFF_TIMEOUT (60 * 3) /* sec */
#define FJES_COMMAND_EPSTOP_WAIT_TIMEOUT (1) /* sec */
#define FJES_CMD_REQ_ERR_INFO_PARAM (0x0001)
@@ -57,6 +57,7 @@ struct fjes_hw;
#define FJES_RX_STOP_REQ_DONE (0x1)
#define FJES_RX_STOP_REQ_REQUEST (0x2)
#define FJES_RX_POLL_WORK (0x4)
+#define FJES_RX_MTU_CHANGING_DONE (0x8)
#define EP_BUFFER_SIZE \
(((sizeof(union ep_buffer_info) + (128 * (64 * 1024))) \
@@ -299,6 +300,8 @@ struct fjes_hw {
u8 *base;
struct fjes_hw_info hw_info;
+
+ spinlock_t rx_status_lock; /* spinlock for rx_status */
};
int fjes_hw_init(struct fjes_hw *);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 0ddb54fe3..86c331bb5 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -29,7 +29,7 @@
#include "fjes.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
@@ -290,6 +290,7 @@ static int fjes_close(struct net_device *netdev)
{
struct fjes_adapter *adapter = netdev_priv(netdev);
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int epidx;
netif_tx_stop_all_queues(netdev);
@@ -299,13 +300,18 @@ static int fjes_close(struct net_device *netdev)
napi_disable(&adapter->napi);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status &=
- ~FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status &=
+ ~FJES_RX_POLL_WORK;
}
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
fjes_free_irq(adapter);
@@ -330,6 +336,7 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
int result;
int epidx;
@@ -371,8 +378,10 @@ static int fjes_setup_resources(struct fjes_adapter *adapter)
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx, netdev->dev_addr,
netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
if (fjes_hw_epid_is_same_zone(hw, epidx)) {
mutex_lock(&hw->hw_info.lock);
@@ -402,6 +411,7 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
struct ep_share_mem_info *buf_pair;
struct fjes_hw *hw = &adapter->hw;
bool reset_flag = false;
+ unsigned long flags;
int result;
int epidx;
@@ -418,8 +428,10 @@ static void fjes_free_resources(struct fjes_adapter *adapter)
buf_pair = &hw->ep_shm_info[epidx];
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&buf_pair->tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
}
@@ -459,7 +471,7 @@ static void fjes_tx_stall_task(struct work_struct *work)
int i;
if (((long)jiffies -
- (long)(netdev->trans_start)) > FJES_TX_TX_STALL_TIMEOUT) {
+ dev_trans_start(netdev)) > FJES_TX_TX_STALL_TIMEOUT) {
netif_wake_queue(netdev);
return;
}
@@ -481,6 +493,9 @@ static void fjes_tx_stall_task(struct work_struct *work)
info = adapter->hw.ep_shm_info[epid].tx.info;
+ if (!(info->v1i.rx_status & FJES_RX_MTU_CHANGING_DONE))
+ return;
+
if (EP_RING_FULL(info->v1i.head, info->v1i.tail,
info->v1i.count_max)) {
all_queue_available = 0;
@@ -549,7 +564,8 @@ static void fjes_raise_intr_rxdata_task(struct work_struct *work)
if ((hw->ep_shm_info[epid].tx_status_work ==
FJES_TX_DELAY_SEND_PENDING) &&
(pstatus == EP_PARTNER_SHARED) &&
- !(hw->ep_shm_info[epid].rx.info->v1i.rx_status)) {
+ !(hw->ep_shm_info[epid].rx.info->v1i.rx_status &
+ FJES_RX_POLL_WORK)) {
fjes_hw_raise_interrupt(hw, epid,
REG_ICTL_MASK_RX_DATA);
}
@@ -653,7 +669,7 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
/* version is NOT 0 */
adapter->stats64.tx_carrier_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_carrier_errors += 1;
ret = NETDEV_TX_OK;
@@ -661,9 +677,9 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
&adapter->hw.ep_shm_info[dest_epid].rx,
netdev->mtu)) {
adapter->stats64.tx_dropped += 1;
- hw->ep_shm_info[my_epid].net_stats.tx_dropped += 1;
+ hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
adapter->stats64.tx_errors += 1;
- hw->ep_shm_info[my_epid].net_stats.tx_errors += 1;
+ hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
ret = NETDEV_TX_OK;
} else if (vlan &&
@@ -694,15 +710,15 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
(long)adapter->tx_start_jiffies) >=
FJES_TX_RETRY_TIMEOUT) {
adapter->stats64.tx_fifo_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_fifo_errors += 1;
adapter->stats64.tx_errors += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_errors += 1;
ret = NETDEV_TX_OK;
} else {
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
netif_tx_stop_queue(cur_queue);
if (!work_pending(&adapter->tx_stall_task))
@@ -714,10 +730,10 @@ fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
} else {
if (!is_multi) {
adapter->stats64.tx_packets += 1;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_packets += 1;
adapter->stats64.tx_bytes += len;
- hw->ep_shm_info[my_epid].net_stats
+ hw->ep_shm_info[dest_epid].net_stats
.tx_bytes += len;
}
@@ -759,9 +775,12 @@ fjes_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct fjes_adapter *adapter = netdev_priv(netdev);
bool running = netif_running(netdev);
- int ret = 0;
- int idx;
+ struct fjes_hw *hw = &adapter->hw;
+ unsigned long flags;
+ int ret = -EINVAL;
+ int idx, epidx;
for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
if (new_mtu <= fjes_support_mtu[idx]) {
@@ -769,19 +788,58 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu == netdev->mtu)
return 0;
- if (running)
- fjes_close(netdev);
+ ret = 0;
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ if (running) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
+ ~FJES_RX_MTU_CHANGING_DONE;
+ }
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
+
+ netif_tx_stop_all_queues(netdev);
+ netif_carrier_off(netdev);
+ cancel_work_sync(&adapter->tx_stall_task);
+ napi_disable(&adapter->napi);
+
+ msleep(1000);
+
+ netif_tx_stop_all_queues(netdev);
+ }
+
+ netdev->mtu = new_mtu;
- netdev->mtu = new_mtu;
+ if (running) {
+ for (epidx = 0; epidx < hw->max_epid; epidx++) {
+ if (epidx == hw->my_epid)
+ continue;
- if (running)
- ret = fjes_open(netdev);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
+ fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
+ netdev->dev_addr,
+ netdev->mtu);
- return ret;
+ hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
+ FJES_RX_MTU_CHANGING_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
}
+
+ netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
+ napi_enable(&adapter->napi);
+ napi_schedule(&adapter->napi);
}
- return -EINVAL;
+ return ret;
}
static int fjes_vlan_rx_add_vid(struct net_device *netdev,
@@ -825,6 +883,7 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
@@ -834,8 +893,10 @@ static void fjes_txrx_stop_req_irq(struct fjes_adapter *adapter,
break;
case EP_PARTNER_WAITING:
if (src_epid < hw->my_epid) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
set_bit(src_epid, &adapter->unshare_watch_bitmask);
@@ -861,14 +922,17 @@ static void fjes_stop_req_irq(struct fjes_adapter *adapter, int src_epid)
{
struct fjes_hw *hw = &adapter->hw;
enum ep_partner_status status;
+ unsigned long flags;
set_bit(src_epid, &hw->hw_info.buffer_unshare_reserve_bit);
status = fjes_hw_get_partner_ep_status(hw, src_epid);
switch (status) {
case EP_PARTNER_WAITING:
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[src_epid].tx.info->v1i.rx_status |=
FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(src_epid, &hw->txrx_stop_req_bit);
/* fall through */
case EP_PARTNER_UNSHARE:
@@ -1001,13 +1065,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
size_t frame_len;
void *frame;
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |=
- FJES_RX_POLL_WORK;
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx]
+ .tx.info->v1i.rx_status |= FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
while (work_done < budget) {
prefetch(&adapter->hw);
@@ -1065,13 +1133,17 @@ static int fjes_poll(struct napi_struct *napi, int budget)
if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) {
napi_reschedule(napi);
} else {
+ spin_lock(&hw->rx_status_lock);
for (epidx = 0; epidx < hw->max_epid; epidx++) {
if (epidx == hw->my_epid)
continue;
- adapter->hw.ep_shm_info[epidx]
- .tx.info->v1i.rx_status &=
+ if (fjes_hw_get_partner_ep_status(hw, epidx) ==
+ EP_PARTNER_SHARED)
+ adapter->hw.ep_shm_info[epidx].tx
+ .info->v1i.rx_status &=
~FJES_RX_POLL_WORK;
}
+ spin_unlock(&hw->rx_status_lock);
fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false);
}
@@ -1129,7 +1201,7 @@ static int fjes_probe(struct platform_device *plat_dev)
res = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
hw->hw_res.start = res->start;
- hw->hw_res.size = res->end - res->start + 1;
+ hw->hw_res.size = resource_size(res);
hw->hw_res.irq = platform_get_irq(plat_dev, 0);
err = fjes_hw_init(&adapter->hw);
if (err)
@@ -1203,7 +1275,7 @@ static void fjes_netdev_setup(struct net_device *netdev)
netdev->watchdog_timeo = FJES_TX_RETRY_INTERVAL;
netdev->netdev_ops = &fjes_netdev_ops;
fjes_set_ethtool_ops(netdev);
- netdev->mtu = fjes_support_mtu[0];
+ netdev->mtu = fjes_support_mtu[3];
netdev->flags |= IFF_BROADCAST;
netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER;
}
@@ -1240,6 +1312,7 @@ static void fjes_watch_unshare_task(struct work_struct *work)
int max_epid, my_epid, epidx;
int stop_req, stop_req_done;
ulong unshare_watch_bitmask;
+ unsigned long flags;
int wait_time = 0;
int is_shared;
int ret;
@@ -1292,8 +1365,10 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock, flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
@@ -1331,9 +1406,12 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
mutex_unlock(&hw->hw_info.lock);
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
fjes_hw_setup_epbuf(
&hw->ep_shm_info[epidx].tx,
netdev->dev_addr, netdev->mtu);
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
clear_bit(epidx, &hw->txrx_stop_req_bit);
clear_bit(epidx, &unshare_watch_bitmask);
@@ -1341,8 +1419,11 @@ static void fjes_watch_unshare_task(struct work_struct *work)
}
if (test_bit(epidx, &unshare_watch_bitmask)) {
+ spin_lock_irqsave(&hw->rx_status_lock, flags);
hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
~FJES_RX_STOP_REQ_DONE;
+ spin_unlock_irqrestore(&hw->rx_status_lock,
+ flags);
}
}
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index c70e51567..9b3dc3c61 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -87,7 +87,6 @@ struct geneve_sock {
struct socket *sock;
struct rcu_head rcu;
int refcnt;
- struct udp_offload udp_offloads;
struct hlist_head vni_list[VNI_HASH_SIZE];
u32 flags;
};
@@ -405,14 +404,6 @@ static void geneve_notify_add_rx_port(struct geneve_sock *gs)
struct net *net = sock_net(sk);
sa_family_t sa_family = geneve_get_sk_family(gs);
__be16 port = inet_sk(sk)->inet_sport;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(sock_net(sk), &gs->udp_offloads);
- if (err)
- pr_warn("geneve: udp_add_offload failed with status %d\n",
- err);
- }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -428,9 +419,9 @@ static int geneve_hlen(struct genevehdr *gh)
return sizeof(*gh) + gh->opt_len * 4;
}
-static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **geneve_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct genevehdr *gh, *gh2;
@@ -491,8 +482,8 @@ out:
return pp;
}
-static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int geneve_gro_complete(struct sock *sk, struct sk_buff *skb,
+ int nhoff)
{
struct genevehdr *gh;
struct packet_offload *ptype;
@@ -542,14 +533,14 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port,
INIT_HLIST_HEAD(&gs->vni_list[h]);
/* Initialize the geneve udp offloads structure */
- gs->udp_offloads.port = port;
- gs->udp_offloads.callbacks.gro_receive = geneve_gro_receive;
- gs->udp_offloads.callbacks.gro_complete = geneve_gro_complete;
geneve_notify_add_rx_port(gs);
/* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = gs;
tunnel_cfg.encap_type = 1;
+ tunnel_cfg.gro_receive = geneve_gro_receive;
+ tunnel_cfg.gro_complete = geneve_gro_complete;
tunnel_cfg.encap_rcv = geneve_udp_encap_recv;
tunnel_cfg.encap_destroy = NULL;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -573,9 +564,6 @@ static void geneve_notify_del_rx_port(struct geneve_sock *gs)
}
rcu_read_unlock();
-
- if (sa_family == AF_INET)
- udp_del_offload(&gs->udp_offloads);
}
static void __geneve_sock_release(struct geneve_sock *gs)
@@ -705,16 +693,12 @@ static int geneve_build_skb(struct rtable *rt, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_rt;
- }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
goto free_rt;
- }
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -742,16 +726,12 @@ static int geneve6_build_skb(struct dst_entry *dst, struct sk_buff *skb,
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
+ if (unlikely(err))
goto free_dst;
- }
- skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb)) {
- err = PTR_ERR(skb);
+ err = udp_tunnel_handle_offloads(skb, udp_sum);
+ if (err)
goto free_dst;
- }
gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
@@ -946,7 +926,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, key->tun_flags, vni,
info->options_len, opts, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
@@ -955,7 +935,7 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl;
@@ -973,13 +953,13 @@ static netdev_tx_t geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error:
dev_kfree_skb(skb);
-err:
+
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
@@ -1035,7 +1015,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
info->options_len, opts,
flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
prio = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
@@ -1044,7 +1024,7 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = geneve6_build_skb(dst, skb, 0, geneve->vni,
0, NULL, flags, xnet);
if (unlikely(err))
- goto err;
+ goto tx_error;
prio = ip_tunnel_ecn_encap(ip6_tclass(fl6.flowlabel),
iip, skb);
@@ -1063,13 +1043,13 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tx_error:
dev_kfree_skb(skb);
-err:
+
if (err == -ELOOP)
dev->stats.collisions++;
else if (err == -ENETUNREACH)
dev->stats.tx_carrier_errors++;
- else
- dev->stats.tx_errors++;
+
+ dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
#endif
@@ -1194,7 +1174,7 @@ static struct device_type geneve_type = {
* supply the listening GENEVE udp ports. Callers are expected
* to implement the ndo_add_geneve_port.
*/
-void geneve_get_rx_port(struct net_device *dev)
+static void geneve_push_rx_ports(struct net_device *dev)
{
struct net *net = dev_net(dev);
struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1203,6 +1183,9 @@ void geneve_get_rx_port(struct net_device *dev)
struct sock *sk;
__be16 port;
+ if (!dev->netdev_ops->ndo_add_geneve_port)
+ return;
+
rcu_read_lock();
list_for_each_entry_rcu(gs, &gn->sock_list, list) {
sk = gs->sock->sk;
@@ -1212,7 +1195,6 @@ void geneve_get_rx_port(struct net_device *dev)
}
rcu_read_unlock();
}
-EXPORT_SYMBOL_GPL(geneve_get_rx_port);
/* Initialize the device structure. */
static void geneve_setup(struct net_device *dev)
@@ -1531,6 +1513,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
{
struct nlattr *tb[IFLA_MAX + 1];
struct net_device *dev;
+ LIST_HEAD(list_kill);
int err;
memset(tb, 0, sizeof(tb));
@@ -1542,8 +1525,10 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
err = geneve_configure(net, dev, &geneve_remote_unspec,
0, 0, 0, 0, htons(dst_port), true,
GENEVE_F_UDP_ZERO_CSUM6_RX);
- if (err)
- goto err;
+ if (err) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
/* openvswitch users expect packet sizes to be unrestricted,
* so set the largest MTU we can.
@@ -1552,14 +1537,34 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (err)
goto err;
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0)
+ goto err;
+
return dev;
err:
- free_netdev(dev);
+ geneve_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
+static int geneve_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ if (event == NETDEV_OFFLOAD_PUSH_GENEVE)
+ geneve_push_rx_ports(dev);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block geneve_notifier_block __read_mostly = {
+ .notifier_call = geneve_netdevice_event,
+};
+
static __net_init int geneve_init_net(struct net *net)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
@@ -1612,11 +1617,18 @@ static int __init geneve_init_module(void)
if (rc)
goto out1;
- rc = rtnl_link_register(&geneve_link_ops);
+ rc = register_netdevice_notifier(&geneve_notifier_block);
if (rc)
goto out2;
+ rc = rtnl_link_register(&geneve_link_ops);
+ if (rc)
+ goto out3;
+
return 0;
+
+out3:
+ unregister_netdevice_notifier(&geneve_notifier_block);
out2:
unregister_pernet_subsys(&geneve_net_ops);
out1:
@@ -1627,6 +1639,7 @@ late_initcall(geneve_init_module);
static void __exit geneve_cleanup_module(void)
{
rtnl_link_unregister(&geneve_link_ops);
+ unregister_netdevice_notifier(&geneve_notifier_block);
unregister_pernet_subsys(&geneve_net_ops);
}
module_exit(geneve_cleanup_module);
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
new file mode 100644
index 000000000..4e976a0d5
--- /dev/null
+++ b/drivers/net/gtp.c
@@ -0,0 +1,1375 @@
+/* GTP according to GSM TS 09.60 / 3GPP TS 29.060
+ *
+ * (C) 2012-2014 by sysmocom - s.f.m.c. GmbH
+ * (C) 2016 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Author: Harald Welte <hwelte@sysmocom.de>
+ * Pablo Neira Ayuso <pablo@netfilter.org>
+ * Andreas Schultz <aschultz@travelping.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/rculist.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/file.h>
+#include <linux/gtp.h>
+
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/xfrm.h>
+#include <net/genetlink.h>
+#include <net/netns/generic.h>
+#include <net/gtp.h>
+
+/* An active session for the subscriber. */
+struct pdp_ctx {
+ struct hlist_node hlist_tid;
+ struct hlist_node hlist_addr;
+
+ union {
+ u64 tid;
+ struct {
+ u64 tid;
+ u16 flow;
+ } v0;
+ struct {
+ u32 i_tei;
+ u32 o_tei;
+ } v1;
+ } u;
+ u8 gtp_version;
+ u16 af;
+
+ struct in_addr ms_addr_ip4;
+ struct in_addr sgsn_addr_ip4;
+
+ atomic_t tx_seq;
+ struct rcu_head rcu_head;
+};
+
+/* One instance of the GTP device. */
+struct gtp_dev {
+ struct list_head list;
+
+ struct socket *sock0;
+ struct socket *sock1u;
+
+ struct net *net;
+ struct net_device *dev;
+
+ unsigned int hash_size;
+ struct hlist_head *tid_hash;
+ struct hlist_head *addr_hash;
+};
+
+static int gtp_net_id __read_mostly;
+
+struct gtp_net {
+ struct list_head gtp_dev_list;
+};
+
+static u32 gtp_h_initval;
+
+static inline u32 gtp0_hashfn(u64 tid)
+{
+ u32 *tid32 = (u32 *) &tid;
+ return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
+}
+
+static inline u32 gtp1u_hashfn(u32 tid)
+{
+ return jhash_1word(tid, gtp_h_initval);
+}
+
+static inline u32 ipv4_hashfn(__be32 ip)
+{
+ return jhash_1word((__force u32)ip, gtp_h_initval);
+}
+
+/* Resolve a PDP context structure based on the 64bit TID. */
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+ if (pdp->gtp_version == GTP_V0 &&
+ pdp->u.v0.tid == tid)
+ return pdp;
+ }
+ return NULL;
+}
+
+/* Resolve a PDP context structure based on the 32bit TEI. */
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
+ if (pdp->gtp_version == GTP_V1 &&
+ pdp->u.v1.i_tei == tid)
+ return pdp;
+ }
+ return NULL;
+}
+
+/* Resolve a PDP context based on IPv4 address of MS. */
+static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->addr_hash[ipv4_hashfn(ms_addr) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+ if (pdp->af == AF_INET &&
+ pdp->ms_addr_ip4.s_addr == ms_addr)
+ return pdp;
+ }
+
+ return NULL;
+}
+
+static bool gtp_check_src_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen)
+{
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, hdrlen + sizeof(struct iphdr)))
+ return false;
+
+ iph = (struct iphdr *)(skb->data + hdrlen + sizeof(struct iphdr));
+
+ return iph->saddr != pctx->ms_addr_ip4.s_addr;
+}
+
+/* Check if the inner IP source address in this packet is assigned to any
+ * existing mobile subscriber.
+ */
+static bool gtp_check_src_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen)
+{
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_IP:
+ return gtp_check_src_ms_ipv4(skb, pctx, hdrlen);
+ }
+ return false;
+}
+
+/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
+static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+ bool xnet)
+{
+ unsigned int hdrlen = sizeof(struct udphdr) +
+ sizeof(struct gtp0_header);
+ struct gtp0_header *gtp0;
+ struct pdp_ctx *pctx;
+ int ret = 0;
+
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
+
+ if ((gtp0->flags >> 5) != GTP_V0)
+ return 1;
+
+ if (gtp0->type != GTP_TPDU)
+ return 1;
+
+ rcu_read_lock();
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ ret = -1;
+ goto out_rcu;
+ }
+
+ if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+ netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+ ret = -1;
+ goto out_rcu;
+ }
+ rcu_read_unlock();
+
+ /* Get rid of the GTP + UDP headers. */
+ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+ rcu_read_unlock();
+ return ret;
+}
+
+static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
+ bool xnet)
+{
+ unsigned int hdrlen = sizeof(struct udphdr) +
+ sizeof(struct gtp1_header);
+ struct gtp1_header *gtp1;
+ struct pdp_ctx *pctx;
+ int ret = 0;
+
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+ if ((gtp1->flags >> 5) != GTP_V1)
+ return 1;
+
+ if (gtp1->type != GTP_TPDU)
+ return 1;
+
+ /* From 29.060: "This field shall be present if and only if any one or
+ * more of the S, PN and E flags are set.".
+ *
+ * If any of the bit is set, then the remaining ones also have to be
+ * set.
+ */
+ if (gtp1->flags & GTP1_F_MASK)
+ hdrlen += 4;
+
+ /* Make sure the header is larger enough, including extensions. */
+ if (!pskb_may_pull(skb, hdrlen))
+ return -1;
+
+ gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
+
+ rcu_read_lock();
+ pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+ if (!pctx) {
+ netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
+ ret = -1;
+ goto out_rcu;
+ }
+
+ if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
+ netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
+ ret = -1;
+ goto out_rcu;
+ }
+ rcu_read_unlock();
+
+ /* Get rid of the GTP + UDP headers. */
+ return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
+out_rcu:
+ rcu_read_unlock();
+ return ret;
+}
+
+static void gtp_encap_disable(struct gtp_dev *gtp)
+{
+ if (gtp->sock0 && gtp->sock0->sk) {
+ udp_sk(gtp->sock0->sk)->encap_type = 0;
+ rcu_assign_sk_user_data(gtp->sock0->sk, NULL);
+ }
+ if (gtp->sock1u && gtp->sock1u->sk) {
+ udp_sk(gtp->sock1u->sk)->encap_type = 0;
+ rcu_assign_sk_user_data(gtp->sock1u->sk, NULL);
+ }
+
+ gtp->sock0 = NULL;
+ gtp->sock1u = NULL;
+}
+
+static void gtp_encap_destroy(struct sock *sk)
+{
+ struct gtp_dev *gtp;
+
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (gtp)
+ gtp_encap_disable(gtp);
+}
+
+/* UDP encapsulation receive handler. See net/ipv4/udp.c.
+ * Return codes: 0: success, <0: error, >0: pass up to userspace UDP socket.
+ */
+static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pcpu_sw_netstats *stats;
+ struct gtp_dev *gtp;
+ bool xnet;
+ int ret;
+
+ gtp = rcu_dereference_sk_user_data(sk);
+ if (!gtp)
+ return 1;
+
+ netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
+
+ xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+
+ switch (udp_sk(sk)->encap_type) {
+ case UDP_ENCAP_GTP0:
+ netdev_dbg(gtp->dev, "received GTP0 packet\n");
+ ret = gtp0_udp_encap_recv(gtp, skb, xnet);
+ break;
+ case UDP_ENCAP_GTP1U:
+ netdev_dbg(gtp->dev, "received GTP1U packet\n");
+ ret = gtp1u_udp_encap_recv(gtp, skb, xnet);
+ break;
+ default:
+ ret = -1; /* Shouldn't happen. */
+ }
+
+ switch (ret) {
+ case 1:
+ netdev_dbg(gtp->dev, "pass up to the process\n");
+ return 1;
+ case 0:
+ netdev_dbg(gtp->dev, "forwarding packet from GGSN to uplink\n");
+ break;
+ case -1:
+ netdev_dbg(gtp->dev, "GTP packet has been dropped\n");
+ kfree_skb(skb);
+ return 0;
+ }
+
+ /* Now that the UDP and the GTP header have been removed, set up the
+ * new network header. This is required by the upper layer to
+ * calculate the transport header.
+ */
+ skb_reset_network_header(skb);
+
+ skb->dev = gtp->dev;
+
+ stats = this_cpu_ptr(gtp->dev->tstats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ netif_rx(skb);
+
+ return 0;
+}
+
+static int gtp_dev_init(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp->dev = dev;
+
+ dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void gtp_dev_uninit(struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp_encap_disable(gtp);
+ free_percpu(dev->tstats);
+}
+
+static struct rtable *ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
+ const struct sock *sk, __be32 daddr)
+{
+ memset(fl4, 0, sizeof(*fl4));
+ fl4->flowi4_oif = sk->sk_bound_dev_if;
+ fl4->daddr = daddr;
+ fl4->saddr = inet_sk(sk)->inet_saddr;
+ fl4->flowi4_tos = RT_CONN_FLAGS(sk);
+ fl4->flowi4_proto = sk->sk_protocol;
+
+ return ip_route_output_key(net, fl4);
+}
+
+static inline void gtp0_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+ int payload_len = skb->len;
+ struct gtp0_header *gtp0;
+
+ gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
+
+ gtp0->flags = 0x1e; /* v0, GTP-non-prime. */
+ gtp0->type = GTP_TPDU;
+ gtp0->length = htons(payload_len);
+ gtp0->seq = htons((atomic_inc_return(&pctx->tx_seq) - 1) % 0xffff);
+ gtp0->flow = htons(pctx->u.v0.flow);
+ gtp0->number = 0xff;
+ gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0xff;
+ gtp0->tid = cpu_to_be64(pctx->u.v0.tid);
+}
+
+static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
+{
+ int payload_len = skb->len;
+ struct gtp1_header *gtp1;
+
+ gtp1 = (struct gtp1_header *) skb_push(skb, sizeof(*gtp1));
+
+ /* Bits 8 7 6 5 4 3 2 1
+ * +--+--+--+--+--+--+--+--+
+ * |version |PT| 1| E| S|PN|
+ * +--+--+--+--+--+--+--+--+
+ * 0 0 1 1 1 0 0 0
+ */
+ gtp1->flags = 0x38; /* v1, GTP-non-prime. */
+ gtp1->type = GTP_TPDU;
+ gtp1->length = htons(payload_len);
+ gtp1->tid = htonl(pctx->u.v1.o_tei);
+
+ /* TODO: Suppport for extension header, sequence number and N-PDU.
+ * Update the length field if any of them is available.
+ */
+}
+
+struct gtp_pktinfo {
+ struct sock *sk;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ struct pdp_ctx *pctx;
+ struct net_device *dev;
+ __be16 gtph_port;
+};
+
+static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
+{
+ switch (pktinfo->pctx->gtp_version) {
+ case GTP_V0:
+ pktinfo->gtph_port = htons(GTP0_PORT);
+ gtp0_push_header(skb, pktinfo->pctx);
+ break;
+ case GTP_V1:
+ pktinfo->gtph_port = htons(GTP1U_PORT);
+ gtp1_push_header(skb, pktinfo->pctx);
+ break;
+ }
+}
+
+static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
+ struct sock *sk, struct iphdr *iph,
+ struct pdp_ctx *pctx, struct rtable *rt,
+ struct flowi4 *fl4,
+ struct net_device *dev)
+{
+ pktinfo->sk = sk;
+ pktinfo->iph = iph;
+ pktinfo->pctx = pctx;
+ pktinfo->rt = rt;
+ pktinfo->fl4 = *fl4;
+ pktinfo->dev = dev;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct pdp_ctx *pctx;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ struct iphdr *iph;
+ struct sock *sk;
+ __be16 df;
+ int mtu;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ iph = ip_hdr(skb);
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ if (gtp->sock0)
+ sk = gtp->sock0->sk;
+ else
+ sk = NULL;
+ break;
+ case GTP_V1:
+ if (gtp->sock1u)
+ sk = gtp->sock1u->sk;
+ else
+ sk = NULL;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ if (!sk) {
+ netdev_dbg(dev, "no userspace socket is available, skip\n");
+ return -ENOENT;
+ }
+
+ rt = ip4_route_output_gtp(sock_net(sk), &fl4, gtp->sock0->sk,
+ pctx->sgsn_addr_ip4.s_addr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to SSGN %pI4\n",
+ &pctx->sgsn_addr_ip4.s_addr);
+ dev->stats.tx_carrier_errors++;
+ goto err;
+ }
+
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to SSGN %pI4\n",
+ &pctx->sgsn_addr_ip4.s_addr);
+ dev->stats.collisions++;
+ goto err_rt;
+ }
+
+ skb_dst_drop(skb);
+
+ /* This is similar to tnl_update_pmtu(). */
+ df = iph->frag_off;
+ if (df) {
+ mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+ sizeof(struct iphdr) - sizeof(struct udphdr);
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ mtu -= sizeof(struct gtp0_header);
+ break;
+ case GTP_V1:
+ mtu -= sizeof(struct gtp1_header);
+ break;
+ }
+ } else {
+ mtu = dst_mtu(&rt->dst);
+ }
+
+ rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu);
+
+ if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(iph->tot_len)) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
+ goto err_rt;
+ }
+
+ gtp_set_pktinfo_ipv4(pktinfo, sk, iph, pctx, rt, &fl4, dev);
+ gtp_push_header(skb, pktinfo);
+
+ return 0;
+err_rt:
+ ip_rt_put(rt);
+err:
+ return -EBADMSG;
+}
+
+static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int proto = ntohs(skb->protocol);
+ struct gtp_pktinfo pktinfo;
+ int err;
+
+ /* Ensure there is sufficient headroom. */
+ if (skb_cow_head(skb, dev->needed_headroom))
+ goto tx_err;
+
+ skb_reset_inner_headers(skb);
+
+ /* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
+ rcu_read_lock();
+ switch (proto) {
+ case ETH_P_IP:
+ err = gtp_build_skb_ip4(skb, dev, &pktinfo);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err < 0)
+ goto tx_err;
+
+ switch (proto) {
+ case ETH_P_IP:
+ netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+ udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
+ pktinfo.fl4.saddr, pktinfo.fl4.daddr,
+ pktinfo.iph->tos,
+ ip4_dst_hoplimit(&pktinfo.rt->dst),
+ htons(IP_DF),
+ pktinfo.gtph_port, pktinfo.gtph_port,
+ true, false);
+ break;
+ }
+
+ return NETDEV_TX_OK;
+tx_err:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops gtp_netdev_ops = {
+ .ndo_init = gtp_dev_init,
+ .ndo_uninit = gtp_dev_uninit,
+ .ndo_start_xmit = gtp_dev_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+};
+
+static void gtp_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &gtp_netdev_ops;
+ dev->destructor = free_netdev;
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+
+ /* Zero header length. */
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ netif_keep_dst(dev);
+
+ /* Assume largest header, ie. GTPv0. */
+ dev->needed_headroom = LL_MAX_HEADER +
+ sizeof(struct iphdr) +
+ sizeof(struct udphdr) +
+ sizeof(struct gtp0_header);
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
+static void gtp_hashtable_free(struct gtp_dev *gtp);
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+ int fd_gtp0, int fd_gtp1, struct net *src_net);
+
+static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int hashsize, err, fd0, fd1;
+ struct gtp_dev *gtp;
+ struct gtp_net *gn;
+
+ if (!data[IFLA_GTP_FD0] || !data[IFLA_GTP_FD1])
+ return -EINVAL;
+
+ gtp = netdev_priv(dev);
+
+ fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
+ fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
+
+ err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+ if (err < 0)
+ goto out_err;
+
+ if (!data[IFLA_GTP_PDP_HASHSIZE])
+ hashsize = 1024;
+ else
+ hashsize = nla_get_u32(data[IFLA_GTP_PDP_HASHSIZE]);
+
+ err = gtp_hashtable_new(gtp, hashsize);
+ if (err < 0)
+ goto out_encap;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+ goto out_hashtable;
+ }
+
+ gn = net_generic(dev_net(dev), gtp_net_id);
+ list_add_rcu(&gtp->list, &gn->gtp_dev_list);
+
+ netdev_dbg(dev, "registered new GTP interface\n");
+
+ return 0;
+
+out_hashtable:
+ gtp_hashtable_free(gtp);
+out_encap:
+ gtp_encap_disable(gtp);
+out_err:
+ return err;
+}
+
+static void gtp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ gtp_encap_disable(gtp);
+ gtp_hashtable_free(gtp);
+ list_del_rcu(&gtp->list);
+ unregister_netdevice_queue(dev, head);
+}
+
+static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
+ [IFLA_GTP_FD0] = { .type = NLA_U32 },
+ [IFLA_GTP_FD1] = { .type = NLA_U32 },
+ [IFLA_GTP_PDP_HASHSIZE] = { .type = NLA_U32 },
+};
+
+static int gtp_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (!data)
+ return -EINVAL;
+
+ return 0;
+}
+
+static size_t gtp_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)); /* IFLA_GTP_PDP_HASHSIZE */
+}
+
+static int gtp_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_GTP_PDP_HASHSIZE, gtp->hash_size))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops gtp_link_ops __read_mostly = {
+ .kind = "gtp",
+ .maxtype = IFLA_GTP_MAX,
+ .policy = gtp_policy,
+ .priv_size = sizeof(struct gtp_dev),
+ .setup = gtp_link_setup,
+ .validate = gtp_validate,
+ .newlink = gtp_newlink,
+ .dellink = gtp_dellink,
+ .get_size = gtp_get_size,
+ .fill_info = gtp_fill_info,
+};
+
+static struct net *gtp_genl_get_net(struct net *src_net, struct nlattr *tb[])
+{
+ struct net *net;
+
+ /* Examine the link attributes and figure out which network namespace
+ * we are talking about.
+ */
+ if (tb[GTPA_NET_NS_FD])
+ net = get_net_ns_by_fd(nla_get_u32(tb[GTPA_NET_NS_FD]));
+ else
+ net = get_net(src_net);
+
+ return net;
+}
+
+static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize)
+{
+ int i;
+
+ gtp->addr_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ if (gtp->addr_hash == NULL)
+ return -ENOMEM;
+
+ gtp->tid_hash = kmalloc(sizeof(struct hlist_head) * hsize, GFP_KERNEL);
+ if (gtp->tid_hash == NULL)
+ goto err1;
+
+ gtp->hash_size = hsize;
+
+ for (i = 0; i < hsize; i++) {
+ INIT_HLIST_HEAD(&gtp->addr_hash[i]);
+ INIT_HLIST_HEAD(&gtp->tid_hash[i]);
+ }
+ return 0;
+err1:
+ kfree(gtp->addr_hash);
+ return -ENOMEM;
+}
+
+static void gtp_hashtable_free(struct gtp_dev *gtp)
+{
+ struct pdp_ctx *pctx;
+ int i;
+
+ for (i = 0; i < gtp->hash_size; i++) {
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ kfree_rcu(pctx, rcu_head);
+ }
+ }
+ synchronize_rcu();
+ kfree(gtp->addr_hash);
+ kfree(gtp->tid_hash);
+}
+
+static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
+ int fd_gtp0, int fd_gtp1, struct net *src_net)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {NULL};
+ struct socket *sock0, *sock1u;
+ int err;
+
+ netdev_dbg(dev, "enable gtp on %d, %d\n", fd_gtp0, fd_gtp1);
+
+ sock0 = sockfd_lookup(fd_gtp0, &err);
+ if (sock0 == NULL) {
+ netdev_dbg(dev, "socket fd=%d not found (gtp0)\n", fd_gtp0);
+ return -ENOENT;
+ }
+
+ if (sock0->sk->sk_protocol != IPPROTO_UDP) {
+ netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp0);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ sock1u = sockfd_lookup(fd_gtp1, &err);
+ if (sock1u == NULL) {
+ netdev_dbg(dev, "socket fd=%d not found (gtp1u)\n", fd_gtp1);
+ err = -ENOENT;
+ goto err1;
+ }
+
+ if (sock1u->sk->sk_protocol != IPPROTO_UDP) {
+ netdev_dbg(dev, "socket fd=%d not UDP\n", fd_gtp1);
+ err = -EINVAL;
+ goto err2;
+ }
+
+ netdev_dbg(dev, "enable gtp on %p, %p\n", sock0, sock1u);
+
+ gtp->sock0 = sock0;
+ gtp->sock1u = sock1u;
+ gtp->net = src_net;
+
+ tuncfg.sk_user_data = gtp;
+ tuncfg.encap_rcv = gtp_encap_recv;
+ tuncfg.encap_destroy = gtp_encap_destroy;
+
+ tuncfg.encap_type = UDP_ENCAP_GTP0;
+ setup_udp_tunnel_sock(sock_net(gtp->sock0->sk), gtp->sock0, &tuncfg);
+
+ tuncfg.encap_type = UDP_ENCAP_GTP1U;
+ setup_udp_tunnel_sock(sock_net(gtp->sock1u->sk), gtp->sock1u, &tuncfg);
+
+ err = 0;
+err2:
+ sockfd_put(sock1u);
+err1:
+ sockfd_put(sock0);
+ return err;
+}
+
+static struct net_device *gtp_find_dev(struct net *net, int ifindex)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+
+ list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ if (ifindex == gtp->dev->ifindex)
+ return gtp->dev;
+ }
+ return NULL;
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ pctx->af = AF_INET;
+ pctx->sgsn_addr_ip4.s_addr =
+ nla_get_be32(info->attrs[GTPA_SGSN_ADDRESS]);
+ pctx->ms_addr_ip4.s_addr =
+ nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ /* According to TS 09.60, sections 7.5.1 and 7.5.2, the flow
+ * label needs to be the same for uplink and downlink packets,
+ * so let's annotate this.
+ */
+ pctx->u.v0.tid = nla_get_u64(info->attrs[GTPA_TID]);
+ pctx->u.v0.flow = nla_get_u16(info->attrs[GTPA_FLOW]);
+ break;
+ case GTP_V1:
+ pctx->u.v1.i_tei = nla_get_u32(info->attrs[GTPA_I_TEI]);
+ pctx->u.v1.o_tei = nla_get_u32(info->attrs[GTPA_O_TEI]);
+ break;
+ default:
+ break;
+ }
+}
+
+static int ipv4_pdp_add(struct net_device *dev, struct genl_info *info)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ u32 hash_ms, hash_tid = 0;
+ struct pdp_ctx *pctx;
+ bool found = false;
+ __be32 ms_addr;
+
+ ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+
+ hlist_for_each_entry_rcu(pctx, &gtp->addr_hash[hash_ms], hlist_addr) {
+ if (pctx->ms_addr_ip4.s_addr == ms_addr) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (info->nlhdr->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ ipv4_pdp_fill(pctx, info);
+
+ if (pctx->gtp_version == GTP_V0)
+ netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
+ pctx->u.v0.tid, pctx);
+ else if (pctx->gtp_version == GTP_V1)
+ netdev_dbg(dev, "GTPv1-U: update tunnel id = %x/%x (pdp %p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+ return 0;
+
+ }
+
+ pctx = kmalloc(sizeof(struct pdp_ctx), GFP_KERNEL);
+ if (pctx == NULL)
+ return -ENOMEM;
+
+ ipv4_pdp_fill(pctx, info);
+ atomic_set(&pctx->tx_seq, 0);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ /* TS 09.60: "The flow label identifies unambiguously a GTP
+ * flow.". We use the tid for this instead, I cannot find a
+ * situation in which this doesn't unambiguosly identify the
+ * PDP context.
+ */
+ hash_tid = gtp0_hashfn(pctx->u.v0.tid) % gtp->hash_size;
+ break;
+ case GTP_V1:
+ hash_tid = gtp1u_hashfn(pctx->u.v1.i_tei) % gtp->hash_size;
+ break;
+ }
+
+ hlist_add_head_rcu(&pctx->hlist_addr, &gtp->addr_hash[hash_ms]);
+ hlist_add_head_rcu(&pctx->hlist_tid, &gtp->tid_hash[hash_tid]);
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+ pctx->u.v0.tid, &pctx->sgsn_addr_ip4,
+ &pctx->ms_addr_ip4, pctx);
+ break;
+ case GTP_V1:
+ netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei,
+ &pctx->sgsn_addr_ip4, &pctx->ms_addr_ip4, pctx);
+ break;
+ }
+
+ return 0;
+}
+
+static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct net *net;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK] ||
+ !info->attrs[GTPA_SGSN_ADDRESS] ||
+ !info->attrs[GTPA_MS_ADDRESS])
+ return -EINVAL;
+
+ switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ case GTP_V0:
+ if (!info->attrs[GTPA_TID] ||
+ !info->attrs[GTPA_FLOW])
+ return -EINVAL;
+ break;
+ case GTP_V1:
+ if (!info->attrs[GTPA_I_TEI] ||
+ !info->attrs[GTPA_O_TEI])
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ return ipv4_pdp_add(dev, info);
+}
+
+static int gtp_genl_del_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct net_device *dev;
+ struct pdp_ctx *pctx;
+ struct gtp_dev *gtp;
+ struct net *net;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK])
+ return -EINVAL;
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ gtp = netdev_priv(dev);
+
+ switch (nla_get_u32(info->attrs[GTPA_VERSION])) {
+ case GTP_V0:
+ if (!info->attrs[GTPA_TID])
+ return -EINVAL;
+ pctx = gtp0_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_TID]));
+ break;
+ case GTP_V1:
+ if (!info->attrs[GTPA_I_TEI])
+ return -EINVAL;
+ pctx = gtp1_pdp_find(gtp, nla_get_u64(info->attrs[GTPA_I_TEI]));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (pctx == NULL)
+ return -ENOENT;
+
+ if (pctx->gtp_version == GTP_V0)
+ netdev_dbg(dev, "GTPv0-U: deleting tunnel id = %llx (pdp %p)\n",
+ pctx->u.v0.tid, pctx);
+ else if (pctx->gtp_version == GTP_V1)
+ netdev_dbg(dev, "GTPv1-U: deleting tunnel id = %x/%x (pdp %p)\n",
+ pctx->u.v1.i_tei, pctx->u.v1.o_tei, pctx);
+
+ hlist_del_rcu(&pctx->hlist_tid);
+ hlist_del_rcu(&pctx->hlist_addr);
+ kfree_rcu(pctx, rcu_head);
+
+ return 0;
+}
+
+static struct genl_family gtp_genl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = "gtp",
+ .version = 0,
+ .hdrsize = 0,
+ .maxattr = GTPA_MAX,
+ .netnsok = true,
+};
+
+static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
+ u32 type, struct pdp_ctx *pctx)
+{
+ void *genlh;
+
+ genlh = genlmsg_put(skb, snd_portid, snd_seq, &gtp_genl_family, 0,
+ type);
+ if (genlh == NULL)
+ goto nlmsg_failure;
+
+ if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
+ nla_put_be32(skb, GTPA_SGSN_ADDRESS, pctx->sgsn_addr_ip4.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+ goto nla_put_failure;
+
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
+ nla_put_u16(skb, GTPA_FLOW, pctx->u.v0.flow))
+ goto nla_put_failure;
+ break;
+ case GTP_V1:
+ if (nla_put_u32(skb, GTPA_I_TEI, pctx->u.v1.i_tei) ||
+ nla_put_u32(skb, GTPA_O_TEI, pctx->u.v1.o_tei))
+ goto nla_put_failure;
+ break;
+ }
+ genlmsg_end(skb, genlh);
+ return 0;
+
+nlmsg_failure:
+nla_put_failure:
+ genlmsg_cancel(skb, genlh);
+ return -EMSGSIZE;
+}
+
+static int gtp_genl_get_pdp(struct sk_buff *skb, struct genl_info *info)
+{
+ struct pdp_ctx *pctx = NULL;
+ struct net_device *dev;
+ struct sk_buff *skb2;
+ struct gtp_dev *gtp;
+ u32 gtp_version;
+ struct net *net;
+ int err;
+
+ if (!info->attrs[GTPA_VERSION] ||
+ !info->attrs[GTPA_LINK])
+ return -EINVAL;
+
+ gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
+ switch (gtp_version) {
+ case GTP_V0:
+ case GTP_V1:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ net = gtp_genl_get_net(sock_net(skb->sk), info->attrs);
+ if (IS_ERR(net))
+ return PTR_ERR(net);
+
+ /* Check if there's an existing gtpX device to configure */
+ dev = gtp_find_dev(net, nla_get_u32(info->attrs[GTPA_LINK]));
+ if (dev == NULL) {
+ put_net(net);
+ return -ENODEV;
+ }
+ put_net(net);
+
+ gtp = netdev_priv(dev);
+
+ rcu_read_lock();
+ if (gtp_version == GTP_V0 &&
+ info->attrs[GTPA_TID]) {
+ u64 tid = nla_get_u64(info->attrs[GTPA_TID]);
+
+ pctx = gtp0_pdp_find(gtp, tid);
+ } else if (gtp_version == GTP_V1 &&
+ info->attrs[GTPA_I_TEI]) {
+ u32 tid = nla_get_u32(info->attrs[GTPA_I_TEI]);
+
+ pctx = gtp1_pdp_find(gtp, tid);
+ } else if (info->attrs[GTPA_MS_ADDRESS]) {
+ __be32 ip = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+
+ pctx = ipv4_pdp_find(gtp, ip);
+ }
+
+ if (pctx == NULL) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
+ skb2 = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (skb2 == NULL) {
+ err = -ENOMEM;
+ goto err_unlock;
+ }
+
+ err = gtp_genl_fill_info(skb2, NETLINK_CB(skb).portid,
+ info->snd_seq, info->nlhdr->nlmsg_type, pctx);
+ if (err < 0)
+ goto err_unlock_free;
+
+ rcu_read_unlock();
+ return genlmsg_unicast(genl_info_net(info), skb2, info->snd_portid);
+
+err_unlock_free:
+ kfree_skb(skb2);
+err_unlock:
+ rcu_read_unlock();
+ return err;
+}
+
+static int gtp_genl_dump_pdp(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ struct net *net = sock_net(skb->sk);
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ unsigned long tid = cb->args[1];
+ int i, k = cb->args[0], ret;
+ struct pdp_ctx *pctx;
+
+ if (cb->args[4])
+ return 0;
+
+ list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
+ if (last_gtp && last_gtp != gtp)
+ continue;
+ else
+ last_gtp = NULL;
+
+ for (i = k; i < gtp->hash_size; i++) {
+ hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid) {
+ if (tid && tid != pctx->u.tid)
+ continue;
+ else
+ tid = 0;
+
+ ret = gtp_genl_fill_info(skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ cb->nlh->nlmsg_type, pctx);
+ if (ret < 0) {
+ cb->args[0] = i;
+ cb->args[1] = pctx->u.tid;
+ cb->args[2] = (unsigned long)gtp;
+ goto out;
+ }
+ }
+ }
+ }
+ cb->args[4] = 1;
+out:
+ return skb->len;
+}
+
+static struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
+ [GTPA_LINK] = { .type = NLA_U32, },
+ [GTPA_VERSION] = { .type = NLA_U32, },
+ [GTPA_TID] = { .type = NLA_U64, },
+ [GTPA_SGSN_ADDRESS] = { .type = NLA_U32, },
+ [GTPA_MS_ADDRESS] = { .type = NLA_U32, },
+ [GTPA_FLOW] = { .type = NLA_U16, },
+ [GTPA_NET_NS_FD] = { .type = NLA_U32, },
+ [GTPA_I_TEI] = { .type = NLA_U32, },
+ [GTPA_O_TEI] = { .type = NLA_U32, },
+};
+
+static const struct genl_ops gtp_genl_ops[] = {
+ {
+ .cmd = GTP_CMD_NEWPDP,
+ .doit = gtp_genl_new_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = GTP_CMD_DELPDP,
+ .doit = gtp_genl_del_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = GTP_CMD_GETPDP,
+ .doit = gtp_genl_get_pdp,
+ .dumpit = gtp_genl_dump_pdp,
+ .policy = gtp_genl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static int __net_init gtp_net_init(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+
+ INIT_LIST_HEAD(&gn->gtp_dev_list);
+ return 0;
+}
+
+static void __net_exit gtp_net_exit(struct net *net)
+{
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations gtp_net_ops = {
+ .init = gtp_net_init,
+ .exit = gtp_net_exit,
+ .id = &gtp_net_id,
+ .size = sizeof(struct gtp_net),
+};
+
+static int __init gtp_init(void)
+{
+ int err;
+
+ get_random_bytes(&gtp_h_initval, sizeof(gtp_h_initval));
+
+ err = rtnl_link_register(&gtp_link_ops);
+ if (err < 0)
+ goto error_out;
+
+ err = genl_register_family_with_ops(&gtp_genl_family, gtp_genl_ops);
+ if (err < 0)
+ goto unreg_rtnl_link;
+
+ err = register_pernet_subsys(&gtp_net_ops);
+ if (err < 0)
+ goto unreg_genl_family;
+
+ pr_info("GTP module loaded (pdp ctx size %Zd bytes)\n",
+ sizeof(struct pdp_ctx));
+ return 0;
+
+unreg_genl_family:
+ genl_unregister_family(&gtp_genl_family);
+unreg_rtnl_link:
+ rtnl_link_unregister(&gtp_link_ops);
+error_out:
+ pr_err("error loading GTP module loaded\n");
+ return err;
+}
+late_initcall(gtp_init);
+
+static void __exit gtp_fini(void)
+{
+ unregister_pernet_subsys(&gtp_net_ops);
+ genl_unregister_family(&gtp_genl_family);
+ rtnl_link_unregister(&gtp_link_ops);
+
+ pr_info("GTP module unloaded\n");
+}
+module_exit(gtp_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
+MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("gtp");
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 72c9f1f35..78dbc4454 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -635,10 +635,10 @@ static int receive(struct net_device *dev, int cnt)
#ifdef __i386__
#include <asm/msr.h>
-#define GETTICK(x) \
-({ \
- if (cpu_has_tsc) \
- x = (unsigned int)rdtsc(); \
+#define GETTICK(x) \
+({ \
+ if (boot_cpu_has(X86_FEATURE_TSC)) \
+ x = (unsigned int)rdtsc(); \
})
#else /* __i386__ */
#define GETTICK(x)
@@ -780,8 +780,10 @@ static int baycom_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- if (bc->skb)
- return NETDEV_TX_LOCKED;
+ if (bc->skb) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
/* strip KISS byte */
if (skb->len >= HDLCDRV_MAXFLEN+1 || skb->len < 3) {
dev_kfree_skb(skb);
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 49fe59b18..4bad0b894 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -412,8 +412,10 @@ static netdev_tx_t hdlcdrv_send_packet(struct sk_buff *skb,
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
- if (sm->skb)
- return NETDEV_TX_LOCKED;
+ if (sm->skb) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
netif_stop_queue(dev);
sm->skb = skb;
return NETDEV_TX_OK;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85828f153..1dfe2304d 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -519,7 +519,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
dev->stats.tx_packets++;
dev->stats.tx_bytes += actual;
- ax->dev->trans_start = jiffies;
+ netif_trans_update(ax->dev);
ax->xleft = count - actual;
ax->xhead = ax->xbuff + actual;
}
@@ -542,7 +542,7 @@ static netdev_tx_t ax_xmit(struct sk_buff *skb, struct net_device *dev)
* May be we must check transmitter timeout here ?
* 14 Oct 1994 Dmitry Gorodchanin.
*/
- if (time_before(jiffies, dev->trans_start + 20 * HZ)) {
+ if (time_before(jiffies, dev_trans_start(dev) + 20 * HZ)) {
/* 20 sec timeout not reached */
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index ce88df33f..b8083161e 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1669,7 +1669,7 @@ static netdev_tx_t scc_net_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb(skb_del);
}
skb_queue_tail(&scc->tx_queue, skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/*
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1c1d5e105..b1f395e2f 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -601,7 +601,7 @@ static netdev_tx_t yam_send_packet(struct sk_buff *skb,
return ax25_ip_xmit(skb);
skb_queue_tail(&yp->send_queue, skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 8b3bd8ecd..c270c5a54 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -158,7 +158,7 @@ enum rndis_device_state {
};
struct rndis_device {
- struct netvsc_device *net_dev;
+ struct net_device *ndev;
enum rndis_device_state state;
bool link_state;
@@ -202,6 +202,8 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
+void netvsc_switch_datapath(struct net_device *nv_dev, bool vf);
+
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
@@ -641,10 +643,18 @@ struct netvsc_reconfig {
u32 event;
};
+struct garp_wrk {
+ struct work_struct dwrk;
+ struct net_device *netdev;
+ struct netvsc_device *netvsc_dev;
+};
+
/* The context of the netvsc device */
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
+ /* netvsc_device */
+ struct netvsc_device *nvdev;
/* reconfigure work */
struct delayed_work dwork;
/* last reconfig time */
@@ -656,6 +666,7 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
+ struct garp_wrk gwrk;
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
@@ -663,17 +674,17 @@ struct net_device_context {
/* Ethtool settings */
u8 duplex;
u32 speed;
+
+ /* the device is going away */
+ bool start_remove;
};
/* Per netvsc device */
struct netvsc_device {
- struct hv_device *dev;
-
u32 nvsp_version;
atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
- bool start_remove;
bool destroy;
/* Receive buffer allocated by us but manages by NetVSP */
@@ -699,8 +710,6 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
/* unsigned char HwMacAddr[HW_MACADDR_LEN]; */
- struct net_device *ndev;
-
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
@@ -723,13 +732,15 @@ struct netvsc_device {
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- /* The net device context */
- struct net_device_context *nd_ctx;
-
/* 1: allocated, serial number is valid. 0: not allocated */
u32 vf_alloc;
/* Serial number of the VF to team with */
u32 vf_serial;
+ atomic_t open_cnt;
+ /* State to manage the associated VF interface. */
+ bool vf_inject;
+ struct net_device *vf_netdev;
+ atomic_t vf_use_cnt;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ec313fc08..719cb3578 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -33,11 +33,36 @@
#include "hyperv_net.h"
+/*
+ * Switch the data path from the synthetic interface to the VF
+ * interface.
+ */
+void netvsc_switch_datapath(struct net_device *ndev, bool vf)
+{
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct hv_device *dev = net_device_ctx->device_ctx;
+ struct netvsc_device *nv_dev = net_device_ctx->nvdev;
+ struct nvsp_message *init_pkt = &nv_dev->channel_init_pkt;
+
+ memset(init_pkt, 0, sizeof(struct nvsp_message));
+ init_pkt->hdr.msg_type = NVSP_MSG4_TYPE_SWITCH_DATA_PATH;
+ if (vf)
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_VF;
+ else
+ init_pkt->msg.v4_msg.active_dp.active_datapath =
+ NVSP_DATAPATH_SYNTHETIC;
+
+ vmbus_sendpacket(dev->channel, init_pkt,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_pkt,
+ VM_PKT_DATA_INBAND, 0);
+}
+
-static struct netvsc_device *alloc_net_device(struct hv_device *device)
+static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
- struct net_device *ndev = hv_get_drvdata(device);
net_device = kzalloc(sizeof(struct netvsc_device), GFP_KERNEL);
if (!net_device)
@@ -50,14 +75,15 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
}
init_waitqueue_head(&net_device->wait_drain);
- net_device->start_remove = false;
net_device->destroy = false;
- net_device->dev = device;
- net_device->ndev = ndev;
+ atomic_set(&net_device->open_cnt, 0);
+ atomic_set(&net_device->vf_use_cnt, 0);
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
- hv_set_drvdata(device, net_device);
+ net_device->vf_netdev = NULL;
+ net_device->vf_inject = false;
+
return net_device;
}
@@ -69,9 +95,10 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
- struct netvsc_device *net_device;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
- net_device = hv_get_drvdata(device);
if (net_device && net_device->destroy)
net_device = NULL;
@@ -80,9 +107,9 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
- struct netvsc_device *net_device;
-
- net_device = hv_get_drvdata(device);
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
if (!net_device)
goto get_in_err;
@@ -96,11 +123,13 @@ get_in_err:
}
-static int netvsc_destroy_buf(struct netvsc_device *net_device)
+static int netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
int ret = 0;
- struct net_device *ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
/*
* If we got a section count, it means we received a
@@ -118,7 +147,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->msg.v1_msg.
revoke_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
- ret = vmbus_sendpacket(net_device->dev->channel,
+ ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
@@ -136,8 +165,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
/* Teardown the gpadl on the vsp end */
if (net_device->recv_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(net_device->dev->channel,
- net_device->recv_buf_gpadl_handle);
+ ret = vmbus_teardown_gpadl(device->channel,
+ net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -178,7 +207,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->msg.v1_msg.revoke_send_buf.id =
NETVSC_SEND_BUFFER_ID;
- ret = vmbus_sendpacket(net_device->dev->channel,
+ ret = vmbus_sendpacket(device->channel,
revoke_packet,
sizeof(struct nvsp_message),
(unsigned long)revoke_packet,
@@ -194,7 +223,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
}
/* Teardown the gpadl on the vsp end */
if (net_device->send_buf_gpadl_handle) {
- ret = vmbus_teardown_gpadl(net_device->dev->channel,
+ ret = vmbus_teardown_gpadl(device->channel,
net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
@@ -229,7 +258,7 @@ static int netvsc_init_buf(struct hv_device *device)
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
node = cpu_to_node(device->channel->target_cpu);
net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
@@ -406,7 +435,7 @@ static int netvsc_init_buf(struct hv_device *device)
goto exit;
cleanup:
- netvsc_destroy_buf(net_device);
+ netvsc_destroy_buf(device);
exit:
return ret;
@@ -419,6 +448,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
struct nvsp_message *init_packet,
u32 nvsp_ver)
{
+ struct net_device *ndev = hv_get_drvdata(device);
int ret;
unsigned long t;
@@ -452,8 +482,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
/* NVSPv2 or later: Send NDIS config */
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
- init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu +
- ETH_HLEN;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = ndev->mtu + ETH_HLEN;
init_packet->msg.v2_msg.send_ndis_config.capability.ieee8021q = 1;
if (nvsp_ver >= NVSP_PROTOCOL_VERSION_5)
@@ -473,7 +502,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
int ndis_version;
- struct net_device *ndev;
u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
int i, num_ver = 4; /* number of different NVSP versions */
@@ -481,7 +509,6 @@ static int netvsc_connect_vsp(struct hv_device *device)
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
- ndev = net_device->ndev;
init_packet = &net_device->channel_init_pkt;
@@ -537,9 +564,9 @@ cleanup:
return ret;
}
-static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
+static void netvsc_disconnect_vsp(struct hv_device *device)
{
- netvsc_destroy_buf(net_device);
+ netvsc_destroy_buf(device);
}
/*
@@ -547,24 +574,13 @@ static void netvsc_disconnect_vsp(struct netvsc_device *net_device)
*/
int netvsc_device_remove(struct hv_device *device)
{
- struct netvsc_device *net_device;
- unsigned long flags;
-
- net_device = hv_get_drvdata(device);
-
- netvsc_disconnect_vsp(net_device);
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
- /*
- * Since we have already drained, we don't need to busy wait
- * as was done in final_release_stor_device()
- * Note that we cannot set the ext pointer to NULL until
- * we have drained - to drain the outgoing packets, we need to
- * allow incoming packets.
- */
+ netvsc_disconnect_vsp(device);
- spin_lock_irqsave(&device->channel->inbound_lock, flags);
- hv_set_drvdata(device, NULL);
- spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
+ net_device_ctx->nvdev = NULL;
/*
* At this point, no one should be accessing net_device
@@ -612,12 +628,11 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
{
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
u32 send_index;
struct sk_buff *skb;
- ndev = net_device->ndev;
-
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -662,7 +677,7 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
- !net_device->start_remove &&
+ !net_device_ctx->start_remove &&
(hv_ringbuf_avail_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1))
netif_tx_wake_queue(netdev_get_tx_queue(
@@ -746,6 +761,7 @@ static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
}
static inline int netvsc_send_pkt(
+ struct hv_device *device,
struct hv_netvsc_packet *packet,
struct netvsc_device *net_device,
struct hv_page_buffer **pb,
@@ -754,7 +770,7 @@ static inline int netvsc_send_pkt(
struct nvsp_message nvmsg;
u16 q_idx = packet->q_idx;
struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
- struct net_device *ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
@@ -949,7 +965,8 @@ int netvsc_send(struct hv_device *device,
}
if (msd_send) {
- m_ret = netvsc_send_pkt(msd_send, net_device, NULL, msd_skb);
+ m_ret = netvsc_send_pkt(device, msd_send, net_device,
+ NULL, msd_skb);
if (m_ret != 0) {
netvsc_free_send_slot(net_device,
@@ -960,7 +977,7 @@ int netvsc_send(struct hv_device *device,
send_now:
if (cur_send)
- ret = netvsc_send_pkt(cur_send, net_device, pb, skb);
+ ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, section_index);
@@ -976,9 +993,7 @@ static void netvsc_send_recv_completion(struct hv_device *device,
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
- struct net_device *ndev;
-
- ndev = net_device->ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
recvcompMessage.hdr.msg_type =
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE;
@@ -1025,11 +1040,9 @@ static void netvsc_receive(struct netvsc_device *net_device,
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
void *data;
- ndev = net_device->ndev;
-
/*
* All inbound packets other than send completion should be xfer page
* packet
@@ -1085,14 +1098,13 @@ static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
struct netvsc_device *nvscdev;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(hdev);
int i;
u32 count, *tab;
nvscdev = get_outbound_net_device(hdev);
if (!nvscdev)
return;
- ndev = nvscdev->ndev;
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
@@ -1151,7 +1163,7 @@ void netvsc_channel_cb(void *context)
net_device = get_inbound_net_device(device);
if (!net_device)
return;
- ndev = net_device->ndev;
+ ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
do {
@@ -1224,30 +1236,19 @@ void netvsc_channel_cb(void *context)
*/
int netvsc_device_add(struct hv_device *device, void *additional_info)
{
- int ret = 0;
+ int i, ret = 0;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
struct netvsc_device *net_device;
- struct net_device *ndev;
+ struct net_device *ndev = hv_get_drvdata(device);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
- net_device = alloc_net_device(device);
+ net_device = alloc_net_device();
if (!net_device)
return -ENOMEM;
net_device->ring_size = ring_size;
- /*
- * Coming into this function, struct net_device * is
- * registered as the driver private data.
- * In alloc_net_device(), we register struct netvsc_device *
- * as the driver private data and stash away struct net_device *
- * in struct netvsc_device *.
- */
- ndev = net_device->ndev;
-
- /* Add netvsc_device context to netvsc_device */
- net_device->nd_ctx = netdev_priv(ndev);
-
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
@@ -1266,7 +1267,19 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
/* Channel is opened */
pr_info("hv_netvsc channel opened successfully\n");
- net_device->chn_table[0] = device->channel;
+ /* If we're reopening the device we may have multiple queues, fill the
+ * chn_table with the default channel to use it before subchannels are
+ * opened.
+ */
+ for (i = 0; i < VRSS_CHANNEL_MAX; i++)
+ net_device->chn_table[i] = device->channel;
+
+ /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
+ * populated.
+ */
+ wmb();
+
+ net_device_ctx->nvdev = net_device;
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index b8121eba3..6a69b5cc9 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -67,18 +67,19 @@ static void do_set_multicast(struct work_struct *w)
{
struct net_device_context *ndevctx =
container_of(w, struct net_device_context, work);
- struct netvsc_device *nvdev;
+ struct hv_device *device_obj = ndevctx->device_ctx;
+ struct net_device *ndev = hv_get_drvdata(device_obj);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
struct rndis_device *rdev;
- nvdev = hv_get_drvdata(ndevctx->device_ctx);
- if (nvdev == NULL || nvdev->ndev == NULL)
+ if (!nvdev)
return;
rdev = nvdev->extension;
if (rdev == NULL)
return;
- if (nvdev->ndev->flags & IFF_PROMISC)
+ if (ndev->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
@@ -99,7 +100,7 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev;
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev;
int ret = 0;
@@ -114,7 +115,6 @@ static int netvsc_open(struct net_device *net)
netif_tx_wake_all_queues(net);
- nvdev = hv_get_drvdata(device_obj);
rdev = nvdev->extension;
if (!rdev->link_state)
netif_carrier_on(net);
@@ -126,7 +126,7 @@ static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(device_obj);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
int ret;
u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
struct vmbus_channel *chn;
@@ -205,8 +205,7 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct hv_device *hdev = net_device_ctx->device_ctx;
- struct netvsc_device *nvsc_dev = hv_get_drvdata(hdev);
+ struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
u32 hash;
u16 q_idx = 0;
@@ -580,7 +579,6 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
struct net_device *net;
struct net_device_context *ndev_ctx;
- struct netvsc_device *net_device;
struct netvsc_reconfig *event;
unsigned long flags;
@@ -590,8 +588,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
return;
- net_device = hv_get_drvdata(device_obj);
- net = net_device->ndev;
+ net = hv_get_drvdata(device_obj);
if (!net || net->reg_state != NETREG_REGISTERED)
return;
@@ -610,42 +607,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
-/*
- * netvsc_recv_callback - Callback when we receive a packet from the
- * "wire" on the specified device.
- */
-int netvsc_recv_callback(struct hv_device *device_obj,
+
+static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct hv_netvsc_packet *packet,
- void **data,
struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci)
+ void *data, u16 vlan_tci)
{
- struct net_device *net;
- struct net_device_context *net_device_ctx;
struct sk_buff *skb;
- struct netvsc_stats *rx_stats;
- net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net || net->reg_state != NETREG_REGISTERED) {
- return NVSP_STAT_FAIL;
- }
- net_device_ctx = netdev_priv(net);
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
-
- /* Allocate a skb - TODO direct I/O to pages? */
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
- if (unlikely(!skb)) {
- ++net->stats.rx_dropped;
- return NVSP_STAT_FAIL;
- }
+ if (!skb)
+ return skb;
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- memcpy(skb_put(skb, packet->total_data_buflen), *data,
- packet->total_data_buflen);
+ memcpy(skb_put(skb, packet->total_data_buflen), data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
if (csum_info) {
@@ -663,6 +642,74 @@ int netvsc_recv_callback(struct hv_device *device_obj,
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
+ return skb;
+}
+
+/*
+ * netvsc_recv_callback - Callback when we receive a packet from the
+ * "wire" on the specified device.
+ */
+int netvsc_recv_callback(struct hv_device *device_obj,
+ struct hv_netvsc_packet *packet,
+ void **data,
+ struct ndis_tcp_ip_checksum_info *csum_info,
+ struct vmbus_channel *channel,
+ u16 vlan_tci)
+{
+ struct net_device *net = hv_get_drvdata(device_obj);
+ struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct sk_buff *skb;
+ struct sk_buff *vf_skb;
+ struct netvsc_stats *rx_stats;
+ struct netvsc_device *netvsc_dev = net_device_ctx->nvdev;
+ u32 bytes_recvd = packet->total_data_buflen;
+ int ret = 0;
+
+ if (!net || net->reg_state != NETREG_REGISTERED)
+ return NVSP_STAT_FAIL;
+
+ if (READ_ONCE(netvsc_dev->vf_inject)) {
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ if (!READ_ONCE(netvsc_dev->vf_inject)) {
+ /*
+ * We raced; just move on.
+ */
+ atomic_dec(&netvsc_dev->vf_use_cnt);
+ goto vf_injection_done;
+ }
+
+ /*
+ * Inject this packet into the VF inerface.
+ * On Hyper-V, multicast and brodcast packets
+ * are only delivered on the synthetic interface
+ * (after subjecting these to policy filters on
+ * the host). Deliver these via the VF interface
+ * in the guest.
+ */
+ vf_skb = netvsc_alloc_recv_skb(netvsc_dev->vf_netdev, packet,
+ csum_info, *data, vlan_tci);
+ if (vf_skb != NULL) {
+ ++netvsc_dev->vf_netdev->stats.rx_packets;
+ netvsc_dev->vf_netdev->stats.rx_bytes += bytes_recvd;
+ netif_receive_skb(vf_skb);
+ } else {
+ ++net->stats.rx_dropped;
+ ret = NVSP_STAT_FAIL;
+ }
+ atomic_dec(&netvsc_dev->vf_use_cnt);
+ return ret;
+ }
+
+vf_injection_done:
+ net_device_ctx = netdev_priv(net);
+ rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+
+ /* Allocate a skb - TODO direct I/O to pages? */
+ skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ if (unlikely(!skb)) {
+ ++net->stats.rx_dropped;
+ return NVSP_STAT_FAIL;
+ }
skb_record_rx_queue(skb, channel->
offermsg.offer.sub_channel_index);
@@ -692,8 +739,7 @@ static void netvsc_get_channels(struct net_device *net,
struct ethtool_channels *channel)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
- struct hv_device *dev = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
if (nvdev) {
channel->max_combined = nvdev->max_chn;
@@ -706,14 +752,14 @@ static int netvsc_set_channels(struct net_device *net,
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct netvsc_device_info device_info;
u32 num_chn;
u32 max_chn;
int ret = 0;
bool recovering = false;
- if (!nvdev || nvdev->destroy)
+ if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
num_chn = nvdev->num_chn;
@@ -742,14 +788,11 @@ static int netvsc_set_channels(struct net_device *net,
goto out;
do_set:
- nvdev->start_remove = true;
+ net_device_ctx->start_remove = true;
rndis_filter_device_remove(dev);
nvdev->num_chn = channels->combined_count;
- net_device_ctx->device_ctx = dev;
- hv_set_drvdata(dev, net);
-
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
device_info.ring_size = ring_size;
@@ -764,7 +807,7 @@ static int netvsc_set_channels(struct net_device *net,
goto recover;
}
- nvdev = hv_get_drvdata(dev);
+ nvdev = net_device_ctx->nvdev;
ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
if (ret) {
@@ -786,6 +829,9 @@ static int netvsc_set_channels(struct net_device *net,
out:
netvsc_open(net);
+ net_device_ctx->start_remove = false;
+ /* We may have missed link change notifications */
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
return ret;
@@ -854,14 +900,14 @@ static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static int netvsc_change_mtu(struct net_device *ndev, int mtu)
{
struct net_device_context *ndevctx = netdev_priv(ndev);
- struct hv_device *hdev = ndevctx->device_ctx;
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
int limit = ETH_DATA_LEN;
u32 num_chn;
int ret = 0;
- if (nvdev == NULL || nvdev->destroy)
+ if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
@@ -876,14 +922,11 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
num_chn = nvdev->num_chn;
- nvdev->start_remove = true;
+ ndevctx->start_remove = true;
rndis_filter_device_remove(hdev);
ndev->mtu = mtu;
- ndevctx->device_ctx = hdev;
- hv_set_drvdata(hdev, ndev);
-
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = num_chn;
@@ -892,6 +935,10 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
out:
netvsc_open(ndev);
+ ndevctx->start_remove = false;
+
+ /* We may have missed link change notifications */
+ schedule_delayed_work(&ndevctx->dwork, 0);
return ret;
}
@@ -1004,18 +1051,22 @@ static const struct net_device_ops device_ops = {
*/
static void netvsc_link_change(struct work_struct *w)
{
- struct net_device_context *ndev_ctx;
- struct net_device *net;
+ struct net_device_context *ndev_ctx =
+ container_of(w, struct net_device_context, dwork.work);
+ struct hv_device *device_obj = ndev_ctx->device_ctx;
+ struct net_device *net = hv_get_drvdata(device_obj);
struct netvsc_device *net_device;
struct rndis_device *rdev;
struct netvsc_reconfig *event = NULL;
bool notify = false, reschedule = false;
unsigned long flags, next_reconfig, delay;
- ndev_ctx = container_of(w, struct net_device_context, dwork.work);
- net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rtnl_lock();
+ if (ndev_ctx->start_remove)
+ goto out_unlock;
+
+ net_device = ndev_ctx->nvdev;
rdev = net_device->extension;
- net = net_device->ndev;
next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
if (time_is_after_jiffies(next_reconfig)) {
@@ -1026,7 +1077,7 @@ static void netvsc_link_change(struct work_struct *w)
delay = next_reconfig - jiffies;
delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
schedule_delayed_work(&ndev_ctx->dwork, delay);
- return;
+ goto out_unlock;
}
ndev_ctx->last_reconfig = jiffies;
@@ -1040,9 +1091,7 @@ static void netvsc_link_change(struct work_struct *w)
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
if (!event)
- return;
-
- rtnl_lock();
+ goto out_unlock;
switch (event->event) {
/* Only the following events are possible due to the check in
@@ -1074,7 +1123,7 @@ static void netvsc_link_change(struct work_struct *w)
netif_tx_stop_all_queues(net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
- list_add_tail(&event->list, &ndev_ctx->reconfig_events);
+ list_add(&event->list, &ndev_ctx->reconfig_events);
spin_unlock_irqrestore(&ndev_ctx->lock, flags);
reschedule = true;
}
@@ -1091,6 +1140,11 @@ static void netvsc_link_change(struct work_struct *w)
*/
if (reschedule)
schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
+
+ return;
+
+out_unlock:
+ rtnl_unlock();
}
static void netvsc_free_netdev(struct net_device *netdev)
@@ -1102,6 +1156,192 @@ static void netvsc_free_netdev(struct net_device *netdev)
free_netdev(netdev);
}
+static void netvsc_notify_peers(struct work_struct *wrk)
+{
+ struct garp_wrk *gwrk;
+
+ gwrk = container_of(wrk, struct garp_wrk, dwrk);
+
+ netdev_notify_peers(gwrk->netdev);
+
+ atomic_dec(&gwrk->netvsc_dev->vf_use_cnt);
+}
+
+static struct net_device *get_netvsc_net_device(char *mac)
+{
+ struct net_device *dev, *found = NULL;
+ int rtnl_locked;
+
+ rtnl_locked = rtnl_trylock();
+
+ for_each_netdev(&init_net, dev) {
+ if (memcmp(dev->dev_addr, mac, ETH_ALEN) == 0) {
+ if (dev->netdev_ops != &device_ops)
+ continue;
+ found = dev;
+ break;
+ }
+ }
+ if (rtnl_locked)
+ rtnl_unlock();
+
+ return found;
+}
+
+static int netvsc_register_vf(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct net_device_context *net_device_ctx;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+ if (eth_ops == NULL || eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ /*
+ * We will use the MAC address to locate the synthetic interface to
+ * associate with the VF interface. If we don't find a matching
+ * synthetic interface, move on.
+ */
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+ if (netvsc_dev == NULL)
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
+ /*
+ * Take a reference on the module.
+ */
+ try_module_get(THIS_MODULE);
+ netvsc_dev->vf_netdev = vf_netdev;
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_up(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+ struct net_device_context *net_device_ctx;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+
+ if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
+ netvsc_dev->vf_inject = true;
+
+ /*
+ * Open the device before switching data path.
+ */
+ rndis_filter_open(net_device_ctx->device_ctx);
+
+ /*
+ * notify the host to switch the data path.
+ */
+ netvsc_switch_datapath(ndev, true);
+ netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
+
+ netif_carrier_off(ndev);
+
+ /*
+ * Now notify peers. We are scheduling work to
+ * notify peers; take a reference to prevent
+ * the VF interface from vanishing.
+ */
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ net_device_ctx->gwrk.netdev = vf_netdev;
+ net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+ schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_vf_down(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ struct net_device_context *net_device_ctx;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+
+ if ((netvsc_dev == NULL) || (netvsc_dev->vf_netdev == NULL))
+ return NOTIFY_DONE;
+
+ netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
+ netvsc_dev->vf_inject = false;
+ /*
+ * Wait for currently active users to
+ * drain out.
+ */
+
+ while (atomic_read(&netvsc_dev->vf_use_cnt) != 0)
+ udelay(50);
+ netvsc_switch_datapath(ndev, false);
+ netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
+ rndis_filter_close(net_device_ctx->device_ctx);
+ netif_carrier_on(ndev);
+ /*
+ * Notify peers.
+ */
+ atomic_inc(&netvsc_dev->vf_use_cnt);
+ net_device_ctx->gwrk.netdev = ndev;
+ net_device_ctx->gwrk.netvsc_dev = netvsc_dev;
+ schedule_work(&net_device_ctx->gwrk.dwrk);
+
+ return NOTIFY_OK;
+}
+
+
+static int netvsc_unregister_vf(struct net_device *vf_netdev)
+{
+ struct net_device *ndev;
+ struct netvsc_device *netvsc_dev;
+ const struct ethtool_ops *eth_ops = vf_netdev->ethtool_ops;
+ struct net_device_context *net_device_ctx;
+
+ if (eth_ops == &ethtool_ops)
+ return NOTIFY_DONE;
+
+ ndev = get_netvsc_net_device(vf_netdev->dev_addr);
+ if (!ndev)
+ return NOTIFY_DONE;
+
+ net_device_ctx = netdev_priv(ndev);
+ netvsc_dev = net_device_ctx->nvdev;
+ if (netvsc_dev == NULL)
+ return NOTIFY_DONE;
+ netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
+
+ netvsc_dev->vf_netdev = NULL;
+ module_put(THIS_MODULE);
+ return NOTIFY_OK;
+}
+
static int netvsc_probe(struct hv_device *dev,
const struct hv_vmbus_device_id *dev_id)
{
@@ -1138,8 +1378,12 @@ static int netvsc_probe(struct hv_device *dev,
}
hv_set_drvdata(dev, net);
+
+ net_device_ctx->start_remove = false;
+
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
+ INIT_WORK(&net_device_ctx->gwrk.dwrk, netvsc_notify_peers);
spin_lock_init(&net_device_ctx->lock);
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
@@ -1168,7 +1412,7 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- nvdev = hv_get_drvdata(dev);
+ nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
@@ -1190,17 +1434,24 @@ static int netvsc_remove(struct hv_device *dev)
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
- net_device = hv_get_drvdata(dev);
- net = net_device->ndev;
+ net = hv_get_drvdata(dev);
if (net == NULL) {
dev_err(&dev->device, "No net device to remove\n");
return 0;
}
- net_device->start_remove = true;
ndev_ctx = netdev_priv(net);
+ net_device = ndev_ctx->nvdev;
+
+ /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
+ * removing the device.
+ */
+ rtnl_lock();
+ ndev_ctx->start_remove = true;
+ rtnl_unlock();
+
cancel_delayed_work_sync(&ndev_ctx->dwork);
cancel_work_sync(&ndev_ctx->work);
@@ -1215,6 +1466,8 @@ static int netvsc_remove(struct hv_device *dev)
*/
rndis_filter_device_remove(dev);
+ hv_set_drvdata(dev, NULL);
+
netvsc_free_netdev(net);
return 0;
}
@@ -1235,19 +1488,58 @@ static struct hv_driver netvsc_drv = {
.remove = netvsc_remove,
};
+
+/*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+ * to the guest. When the corresponding VF instance is registered,
+ * we will take care of switching the data path.
+ */
+static int netvsc_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ return netvsc_register_vf(event_dev);
+ case NETDEV_UNREGISTER:
+ return netvsc_unregister_vf(event_dev);
+ case NETDEV_UP:
+ return netvsc_vf_up(event_dev);
+ case NETDEV_DOWN:
+ return netvsc_vf_down(event_dev);
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static struct notifier_block netvsc_netdev_notifier = {
+ .notifier_call = netvsc_netdev_event,
+};
+
static void __exit netvsc_drv_exit(void)
{
+ unregister_netdevice_notifier(&netvsc_netdev_notifier);
vmbus_driver_unregister(&netvsc_drv);
}
static int __init netvsc_drv_init(void)
{
+ int ret;
+
if (ring_size < RING_SIZE_MIN) {
ring_size = RING_SIZE_MIN;
pr_info("Increased ring_size to %d (min allowed)\n",
ring_size);
}
- return vmbus_driver_register(&netvsc_drv);
+ ret = vmbus_driver_register(&netvsc_drv);
+
+ if (ret)
+ return ret;
+
+ register_netdevice_notifier(&netvsc_netdev_notifier);
+ return 0;
}
MODULE_LICENSE("GPL");
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index c4e1e0408..97c292b7d 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -126,11 +126,7 @@ static void put_rndis_request(struct rndis_device *dev,
static void dump_rndis_message(struct hv_device *hv_dev,
struct rndis_message *rndis_msg)
{
- struct net_device *netdev;
- struct netvsc_device *net_device;
-
- net_device = hv_get_drvdata(hv_dev);
- netdev = net_device->ndev;
+ struct net_device *netdev = hv_get_drvdata(hv_dev);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
@@ -211,6 +207,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
struct hv_netvsc_packet *packet;
struct hv_page_buffer page_buf[2];
struct hv_page_buffer *pb = page_buf;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
/* Setup the packet to send it */
packet = &req->pkt;
@@ -236,7 +233,7 @@ static int rndis_filter_send_request(struct rndis_device *dev,
pb[0].len;
}
- ret = netvsc_send(dev->net_dev->dev, packet, NULL, &pb, NULL);
+ ret = netvsc_send(net_device_ctx->device_ctx, packet, NULL, &pb, NULL);
return ret;
}
@@ -262,9 +259,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_request *request = NULL;
bool found = false;
unsigned long flags;
- struct net_device *ndev;
-
- ndev = dev->net_dev->ndev;
+ struct net_device *ndev = dev->ndev;
spin_lock_irqsave(&dev->request_lock, flags);
list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -355,6 +350,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
struct ndis_pkt_8021q_info *vlan;
struct ndis_tcp_ip_checksum_info *csum_info;
u16 vlan_tci = 0;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
rndis_pkt = &msg->msg.pkt;
@@ -368,7 +364,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
* should be the data packet size plus the trailer padding size
*/
if (pkt->total_data_buflen < rndis_pkt->data_len) {
- netdev_err(dev->net_dev->ndev, "rndis message buffer "
+ netdev_err(dev->ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
pkt->total_data_buflen, rndis_pkt->data_len);
@@ -390,7 +386,7 @@ static int rndis_filter_receive_data(struct rndis_device *dev,
}
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(dev->net_dev->dev, pkt, data,
+ return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
csum_info, channel, vlan_tci);
}
@@ -399,10 +395,11 @@ int rndis_filter_receive(struct hv_device *dev,
void **data,
struct vmbus_channel *channel)
{
- struct netvsc_device *net_dev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_dev = net_device_ctx->nvdev;
struct rndis_device *rndis_dev;
struct rndis_message *rndis_msg;
- struct net_device *ndev;
int ret = 0;
if (!net_dev) {
@@ -410,8 +407,6 @@ int rndis_filter_receive(struct hv_device *dev,
goto exit;
}
- ndev = net_dev->ndev;
-
/* Make sure the rndis device state is initialized */
if (!net_dev->extension) {
netdev_err(ndev, "got rndis message but no rndis device - "
@@ -430,7 +425,7 @@ int rndis_filter_receive(struct hv_device *dev,
rndis_msg = *data;
- if (netif_msg_rx_err(net_dev->nd_ctx))
+ if (netif_msg_rx_err(net_device_ctx))
dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
@@ -550,9 +545,10 @@ static int rndis_filter_query_device_mac(struct rndis_device *dev)
int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac)
{
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev = nvdev->extension;
- struct net_device *ndev = nvdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_config_parameter_info *cpi;
@@ -629,9 +625,10 @@ static int
rndis_filter_set_offload_params(struct hv_device *hdev,
struct ndis_offload_params *req_offloads)
{
- struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct rndis_device *rdev = nvdev->extension;
- struct net_device *ndev = nvdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct ndis_offload_params *offload_params;
@@ -703,7 +700,7 @@ u8 netvsc_hash_key[HASH_KEYLEN] = {
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
{
- struct net_device *ndev = rdev->net_dev->ndev;
+ struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
@@ -799,9 +796,7 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
u32 status;
int ret;
unsigned long t;
- struct net_device *ndev;
-
- ndev = dev->net_dev->ndev;
+ struct net_device *ndev = dev->ndev;
request = get_rndis_request(dev, RNDIS_MSG_SET,
RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
@@ -856,7 +851,8 @@ static int rndis_filter_init_device(struct rndis_device *dev)
u32 status;
int ret;
unsigned long t;
- struct netvsc_device *nvdev = dev->net_dev;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
request = get_rndis_request(dev, RNDIS_MSG_INIT,
RNDIS_MESSAGE_SIZE(struct rndis_initialize_request));
@@ -879,7 +875,6 @@ static int rndis_filter_init_device(struct rndis_device *dev)
goto cleanup;
}
-
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
@@ -910,8 +905,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
struct rndis_halt_request *halt;
- struct netvsc_device *nvdev = dev->net_dev;
- struct hv_device *hdev = nvdev->dev;
+ struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
+ struct hv_device *hdev = net_device_ctx->device_ctx;
ulong flags;
/* Attempt to do a rndis device halt */
@@ -979,13 +975,14 @@ static int rndis_filter_close_device(struct rndis_device *dev)
static void netvsc_sc_open(struct vmbus_channel *new_sc)
{
- struct netvsc_device *nvscdev;
+ struct net_device *ndev =
+ hv_get_drvdata(new_sc->primary_channel->device_obj);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvscdev = net_device_ctx->nvdev;
u16 chn_index = new_sc->offermsg.offer.sub_channel_index;
int ret;
unsigned long flags;
- nvscdev = hv_get_drvdata(new_sc->primary_channel->device_obj);
-
if (chn_index >= nvscdev->num_chn)
return;
@@ -1010,6 +1007,8 @@ int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
{
int ret;
+ struct net_device *net = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
@@ -1040,16 +1039,15 @@ int rndis_filter_device_add(struct hv_device *dev,
return ret;
}
-
/* Initialize the rndis device */
- net_device = hv_get_drvdata(dev);
+ net_device = net_device_ctx->nvdev;
net_device->max_chn = 1;
net_device->num_chn = 1;
spin_lock_init(&net_device->sc_lock);
net_device->extension = rndis_device;
- rndis_device->net_dev = net_device;
+ rndis_device->ndev = net;
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
@@ -1063,8 +1061,8 @@ int rndis_filter_device_add(struct hv_device *dev,
ret = rndis_filter_query_device(rndis_device,
RNDIS_OID_GEN_MAXIMUM_FRAME_SIZE,
&mtu, &size);
- if (ret == 0 && size == sizeof(u32) && mtu < net_device->ndev->mtu)
- net_device->ndev->mtu = mtu;
+ if (ret == 0 && size == sizeof(u32) && mtu < net->mtu)
+ net->mtu = mtu;
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
@@ -1198,7 +1196,9 @@ err_dev_remv:
void rndis_filter_device_remove(struct hv_device *dev)
{
- struct netvsc_device *net_dev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_dev = net_device_ctx->nvdev;
struct rndis_device *rndis_dev = net_dev->extension;
unsigned long t;
@@ -1224,20 +1224,30 @@ void rndis_filter_device_remove(struct hv_device *dev)
int rndis_filter_open(struct hv_device *dev)
{
- struct netvsc_device *net_device = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
if (!net_device)
return -EINVAL;
+ if (atomic_inc_return(&net_device->open_cnt) != 1)
+ return 0;
+
return rndis_filter_open_device(net_device->extension);
}
int rndis_filter_close(struct hv_device *dev)
{
- struct netvsc_device *nvdev = hv_get_drvdata(dev);
+ struct net_device *ndev = hv_get_drvdata(dev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+ struct netvsc_device *nvdev = net_device_ctx->nvdev;
if (!nvdev)
return -EINVAL;
+ if (atomic_dec_return(&nvdev->open_cnt) != 0)
+ return 0;
+
return rndis_filter_close_device(nvdev->extension);
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index d32920688..83c94fba2 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -915,7 +915,6 @@ static void adf7242_debug(u8 irq1)
(stat & 0xf) == RC_STATUS_PHY_RDY ? "RC_STATUS_PHY_RDY" : "",
(stat & 0xf) == RC_STATUS_RX ? "RC_STATUS_RX" : "",
(stat & 0xf) == RC_STATUS_TX ? "RC_STATUS_TX" : "");
- }
#endif
}
@@ -1026,6 +1025,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
if (ret) {
dev_err(&lp->spi->dev,
"upload firmware failed with %d\n", ret);
+ release_firmware(fw);
return ret;
}
@@ -1033,6 +1033,7 @@ static int adf7242_hw_init(struct adf7242_local *lp)
if (ret) {
dev_err(&lp->spi->dev,
"verify firmware failed with %d\n", ret);
+ release_firmware(fw);
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index cb9e9fe6d..9f10da60e 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1340,7 +1340,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_off_to_aack = 80,
.t_off_to_tx_on = 80,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 210,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -91,
@@ -1355,7 +1355,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_off_to_aack = 110,
.t_off_to_tx_on = 110,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 380,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -91,
@@ -1370,7 +1370,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_off_to_aack = 200,
.t_off_to_tx_on = 200,
.t_off_to_sleep = 35,
- .t_sleep_to_off = 380,
+ .t_sleep_to_off = 1000,
.t_frame = 4096,
.t_p_ack = 545,
.rssi_base_val = -100,
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index b1cd865ad..52c9051f3 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -3,6 +3,8 @@
*
* Written 2013 by Werner Almesberger <werner@almesberger.net>
*
+ * Copyright (c) 2015 - 2016 Stefan Schmidt <stefan@datenfreihafen.org>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2
@@ -472,6 +474,76 @@ atusb_set_txpower(struct ieee802154_hw *hw, s32 mbm)
return -EINVAL;
}
+#define ATUSB_MAX_ED_LEVELS 0xF
+static const s32 atusb_ed_levels[ATUSB_MAX_ED_LEVELS + 1] = {
+ -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+ -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static int
+atusb_set_cca_mode(struct ieee802154_hw *hw, const struct wpan_phy_cca *cca)
+{
+ struct atusb *atusb = hw->priv;
+ u8 val;
+
+ /* mapping 802.15.4 to driver spec */
+ switch (cca->mode) {
+ case NL802154_CCA_ENERGY:
+ val = 1;
+ break;
+ case NL802154_CCA_CARRIER:
+ val = 2;
+ break;
+ case NL802154_CCA_ENERGY_CARRIER:
+ switch (cca->opt) {
+ case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+ val = 3;
+ break;
+ case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+ val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return atusb_write_subreg(atusb, SR_CCA_MODE, val);
+}
+
+static int
+atusb_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct atusb *atusb = hw->priv;
+ u32 i;
+
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return atusb_write_subreg(atusb, SR_CCA_ED_THRES, i);
+ }
+
+ return -EINVAL;
+}
+
+static int
+atusb_set_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, u8 retries)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ ret = atusb_write_subreg(atusb, SR_MIN_BE, min_be);
+ if (ret)
+ return ret;
+
+ ret = atusb_write_subreg(atusb, SR_MAX_BE, max_be);
+ if (ret)
+ return ret;
+
+ return atusb_write_subreg(atusb, SR_MAX_CSMA_RETRIES, retries);
+}
+
static int
atusb_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
{
@@ -508,6 +580,9 @@ static struct ieee802154_ops atusb_ops = {
.stop = atusb_stop,
.set_hw_addr_filt = atusb_set_hw_addr_filt,
.set_txpower = atusb_set_txpower,
+ .set_cca_mode = atusb_set_cca_mode,
+ .set_cca_ed_level = atusb_set_cca_ed_level,
+ .set_csma_params = atusb_set_csma_params,
.set_promiscuous_mode = atusb_set_promiscuous_mode,
};
@@ -636,9 +711,20 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
+
+ hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ hw->phy->supported.cca_ed_levels = atusb_ed_levels;
+ hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(atusb_ed_levels);
- hw->phy->flags = WPAN_PHY_FLAG_TXPOWER;
+ hw->phy->cca.mode = NL802154_CCA_ENERGY;
hw->phy->current_page = 0;
hw->phy->current_channel = 11; /* reset default */
@@ -647,6 +733,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->phy->supported.tx_powers_size = ARRAY_SIZE(atusb_powers);
hw->phy->transmit_power = hw->phy->supported.tx_powers[0];
ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+ hw->phy->cca_ed_level = hw->phy->supported.cca_ed_levels[7];
atusb_command(atusb, ATUSB_RF_RESET, 0);
atusb_get_and_show_chip(atusb);
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index 764a2bddf..f446db828 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -61,6 +61,7 @@
#define REG_TXBCON0 0x1A
#define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */
#define BIT_TXNTRIG BIT(0)
+#define BIT_TXNSECEN BIT(1)
#define BIT_TXNACKREQ BIT(2)
#define REG_TXG1CON 0x1C
@@ -85,10 +86,13 @@
#define REG_INTSTAT 0x31 /* Interrupt Status */
#define BIT_TXNIF BIT(0)
#define BIT_RXIF BIT(3)
+#define BIT_SECIF BIT(4)
+#define BIT_SECIGNORE BIT(7)
#define REG_INTCON 0x32 /* Interrupt Control */
#define BIT_TXNIE BIT(0)
#define BIT_RXIE BIT(3)
+#define BIT_SECIE BIT(4)
#define REG_GPIO 0x33 /* GPIO */
#define REG_TRISGPIO 0x34 /* GPIO direction */
@@ -548,6 +552,9 @@ static void write_tx_buf_complete(void *context)
u8 val = BIT_TXNTRIG;
int ret;
+ if (ieee802154_is_secen(fc))
+ val |= BIT_TXNSECEN;
+
if (ieee802154_is_ackreq(fc))
val |= BIT_TXNACKREQ;
@@ -616,7 +623,7 @@ static int mrf24j40_start(struct ieee802154_hw *hw)
/* Clear TXNIE and RXIE. Enable interrupts */
return regmap_update_bits(devrec->regmap_short, REG_INTCON,
- BIT_TXNIE | BIT_RXIE, 0);
+ BIT_TXNIE | BIT_RXIE | BIT_SECIE, 0);
}
static void mrf24j40_stop(struct ieee802154_hw *hw)
@@ -1025,6 +1032,11 @@ static void mrf24j40_intstat_complete(void *context)
enable_irq(devrec->spi->irq);
+ /* Ignore Rx security decryption */
+ if (intstat & BIT_SECIF)
+ regmap_write_async(devrec->regmap_short, REG_SECCON0,
+ BIT_SECIGNORE);
+
/* Check for TX complete */
if (intstat & BIT_TXNIF)
ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false);
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index cc56fac3c..66c0eeafc 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -196,6 +196,7 @@ static const struct net_device_ops ifb_netdev_ops = {
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
+ NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_TX)
@@ -224,6 +225,8 @@ static void ifb_setup(struct net_device *dev)
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
+ dev->hw_features |= dev->features;
+ dev->hw_enc_features |= dev->features;
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 57941d3f4..1c4d395fb 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -113,6 +113,7 @@ static int ipvlan_init(struct net_device *dev)
{
struct ipvl_dev *ipvlan = netdev_priv(dev);
const struct net_device *phy_dev = ipvlan->phy_dev;
+ struct ipvl_port *port = ipvlan->port;
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
@@ -128,6 +129,8 @@ static int ipvlan_init(struct net_device *dev)
if (!ipvlan->pcpu_stats)
return -ENOMEM;
+ port->count += 1;
+
return 0;
}
@@ -481,27 +484,21 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_IPVLAN_SLAVE;
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto ipvlan_destroy_port;
+ return err;
err = netdev_upper_dev_link(phy_dev, dev);
- if (err)
- goto ipvlan_destroy_port;
+ if (err) {
+ unregister_netdevice(dev);
+ return err;
+ }
list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
ipvlan_set_port_mode(port, mode);
netif_stacked_transfer_operstate(phy_dev, dev);
return 0;
-
-ipvlan_destroy_port:
- port->count -= 1;
- if (!port->count)
- ipvlan_port_destroy(phy_dev);
-
- return err;
}
static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index a2c227bfb..e070e1222 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -394,12 +394,5 @@ config MCS_FIR
To compile it as a module, choose M here: the module will be called
mcs7780.
-config SH_IRDA
- tristate "SuperH IrDA driver"
- depends on IRDA
- depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM
- help
- Say Y here if your want to enable SuperH IrDA devices.
-
endmenu
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index be8ab5b9a..4c344433d 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
obj-$(CONFIG_MCS_FIR) += mcs7780.o
obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
-obj-$(CONFIG_SH_IRDA) += sh_irda.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 64bb44d5d..c285eafd3 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1427,7 +1427,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1533,7 +1533,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
/* Restore bank register */
switch_bank(iobase, BANK0);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
@@ -1946,7 +1946,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
ali_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1966,7 +1966,7 @@ static netdev_tx_t ali_ircc_sir_hard_xmit(struct sk_buff *skb,
/* Turn on transmit finished interrupt. Will fire immediately! */
outb(UART_IER_THRI, iobase+UART_IER);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 303c4bd26..be5bb0b7f 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -531,7 +531,7 @@ static void bfin_sir_send_work(struct work_struct *work)
bfin_sir_dma_tx_chars(dev);
#endif
bfin_sir_enable_tx(port);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
static int bfin_sir_hard_xmit(struct sk_buff *skb, struct net_device *dev)
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index a57bd1102..e3f108c61 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -429,7 +429,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
* do an extra memcpy and increment packet counters...
* Jean II */
irda_usb_change_speed_xbofs(self);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* Will netif_wake_queue() in callback */
goto drop;
}
@@ -526,7 +526,7 @@ static netdev_tx_t irda_usb_hard_xmit(struct sk_buff *skb,
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
spin_unlock_irqrestore(&self->lock, flags);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index dc0dbd8dd..aaecc3baa 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1253,7 +1253,7 @@ static void nsc_ircc_change_dongle_speed(int iobase, int speed, int dongle_id)
*/
static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
{
- struct net_device *dev = self->netdev;
+ struct net_device *dev;
__u8 mcr = MCR_SIR;
int iobase;
__u8 bank;
@@ -1263,6 +1263,7 @@ static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed)
IRDA_ASSERT(self != NULL, return 0;);
+ dev = self->netdev;
iobase = self->io.fir_base;
/* Update accounting for new speed */
@@ -1399,7 +1400,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
* to make sure packets gets through the
* proper xmit handler - Jean II */
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1424,7 +1425,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_sir(struct sk_buff *skb,
/* Restore bank register */
outb(bank, iobase+BSR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
@@ -1470,7 +1471,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
* the speed change has been done.
* Jean II */
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -1553,7 +1554,7 @@ static netdev_tx_t nsc_ircc_hard_xmit_fir(struct sk_buff *skb,
/* Restore bank register */
outb(bank, iobase+BSR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
deleted file mode 100644
index c96b46b2c..000000000
--- a/drivers/net/irda/sh_irda.c
+++ /dev/null
@@ -1,875 +0,0 @@
-/*
- * SuperH IrDA Driver
- *
- * Copyright (C) 2010 Renesas Solutions Corp.
- * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * Based on sh_sir.c
- * Copyright (C) 2009 Renesas Solutions Corp.
- * Copyright 2006-2009 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * CAUTION
- *
- * This driver is very simple.
- * So, it doesn't have below support now
- * - MIR/FIR support
- * - DMA transfer support
- * - FIFO mode support
- */
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/clk.h>
-#include <net/irda/wrapper.h>
-#include <net/irda/irda_device.h>
-
-#define DRIVER_NAME "sh_irda"
-
-#define __IRDARAM_LEN 0x1039
-
-#define IRTMR 0x1F00 /* Transfer mode */
-#define IRCFR 0x1F02 /* Configuration */
-#define IRCTR 0x1F04 /* IR control */
-#define IRTFLR 0x1F20 /* Transmit frame length */
-#define IRTCTR 0x1F22 /* Transmit control */
-#define IRRFLR 0x1F40 /* Receive frame length */
-#define IRRCTR 0x1F42 /* Receive control */
-#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
-#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
-#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
-#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
-#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
-#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
-#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
-#define CRCCTR 0x1F80 /* CRC engine control */
-#define CRCIR 0x1F86 /* CRC engine input data */
-#define CRCCR 0x1F8A /* CRC engine calculation */
-#define CRCOR 0x1F8E /* CRC engine output data */
-#define FIFOCP 0x1FC0 /* FIFO current pointer */
-#define FIFOFP 0x1FC2 /* FIFO follow pointer */
-#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
-#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
-#define FIFOSEL 0x1FC8 /* FIFO select */
-#define FIFORS 0x1FCA /* FIFO receive status */
-#define FIFORFL 0x1FCC /* FIFO receive frame length */
-#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
-#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
-#define BIFCTL 0x1FD2 /* BUS interface control */
-#define IRDARAM 0x0000 /* IrDA buffer RAM */
-#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
-
-/* IRTMR */
-#define TMD_MASK (0x3 << 14) /* Transfer Mode */
-#define TMD_SIR (0x0 << 14)
-#define TMD_MIR (0x3 << 14)
-#define TMD_FIR (0x2 << 14)
-
-#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
-#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
-#define SIM (1 << 0) /* SIR Interrupt Mask */
-#define xIM_MASK (FIFORIM | MIM | SIM)
-
-/* IRCFR */
-#define RTO_SHIFT 8 /* shift for Receive Timeout */
-#define RTO (0x3 << RTO_SHIFT)
-
-/* IRTCTR */
-#define ARMOD (1 << 15) /* Auto-Receive Mode */
-#define TE (1 << 0) /* Transmit Enable */
-
-/* IRRFLR */
-#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
-
-/* IRRCTR */
-#define RE (1 << 0) /* Receive Enable */
-
-/*
- * SIRISR, SIRIMR, SIRICR,
- * MFIRISR, MFIRIMR, MFIRICR
- */
-#define FRE (1 << 15) /* Frame Receive End */
-#define TROV (1 << 11) /* Transfer Area Overflow */
-#define xIR_9 (1 << 9)
-#define TOT xIR_9 /* for SIR Timeout */
-#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
-#define xIR_8 (1 << 8)
-#define FER xIR_8 /* for SIR Framing Error */
-#define CRCER xIR_8 /* for MIR/FIR CRC error */
-#define FTE (1 << 7) /* Frame Transmit End */
-#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
-
-/* SIRBCR */
-#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
-
-/* CRCCTR */
-#define CRC_RST (1 << 15) /* CRC Engine Reset */
-#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
-
-/* CRCIR */
-#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
-
-/************************************************************************
-
-
- enum / structure
-
-
-************************************************************************/
-enum sh_irda_mode {
- SH_IRDA_NONE = 0,
- SH_IRDA_SIR,
- SH_IRDA_MIR,
- SH_IRDA_FIR,
-};
-
-struct sh_irda_self;
-struct sh_irda_xir_func {
- int (*xir_fre) (struct sh_irda_self *self);
- int (*xir_trov) (struct sh_irda_self *self);
- int (*xir_9) (struct sh_irda_self *self);
- int (*xir_8) (struct sh_irda_self *self);
- int (*xir_fte) (struct sh_irda_self *self);
-};
-
-struct sh_irda_self {
- void __iomem *membase;
- unsigned int irq;
- struct platform_device *pdev;
-
- struct net_device *ndev;
-
- struct irlap_cb *irlap;
- struct qos_info qos;
-
- iobuff_t tx_buff;
- iobuff_t rx_buff;
-
- enum sh_irda_mode mode;
- spinlock_t lock;
-
- struct sh_irda_xir_func *xir_func;
-};
-
-/************************************************************************
-
-
- common function
-
-
-************************************************************************/
-static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&self->lock, flags);
- iowrite16(data, self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-}
-
-static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
-{
- unsigned long flags;
- u16 ret;
-
- spin_lock_irqsave(&self->lock, flags);
- ret = ioread16(self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-
- return ret;
-}
-
-static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
- u16 mask, u16 data)
-{
- unsigned long flags;
- u16 old, new;
-
- spin_lock_irqsave(&self->lock, flags);
- old = ioread16(self->membase + offset);
- new = (old & ~mask) | data;
- if (old != new)
- iowrite16(data, self->membase + offset);
- spin_unlock_irqrestore(&self->lock, flags);
-}
-
-/************************************************************************
-
-
- mode function
-
-
-************************************************************************/
-/*=====================================
- *
- * common
- *
- *=====================================*/
-static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
-{
- struct device *dev = &self->ndev->dev;
-
- sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
- dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
-}
-
-static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
-{
- struct device *dev = &self->ndev->dev;
-
- if (SH_IRDA_SIR != self->mode)
- interval = 0;
-
- if (interval < 0 || interval > 2) {
- dev_err(dev, "unsupported timeout interval\n");
- return -EINVAL;
- }
-
- sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
- return 0;
-}
-
-static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
-{
- struct device *dev = &self->ndev->dev;
- u16 val;
-
- if (baudrate < 0)
- return 0;
-
- if (SH_IRDA_SIR != self->mode) {
- dev_err(dev, "it is not SIR mode\n");
- return -EINVAL;
- }
-
- /*
- * Baud rate (bits/s) =
- * (48 MHz / 26) / (baud rate counter value + 1) x 16
- */
- val = (48000000 / 26 / 16 / baudrate) - 1;
- dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
-
- sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
-
- return 0;
-}
-
-static int sh_irda_get_rcv_length(struct sh_irda_self *self)
-{
- return RFL_MASK & sh_irda_read(self, IRRFLR);
-}
-
-/*=====================================
- *
- * NONE MODE
- *
- *=====================================*/
-static int sh_irda_xir_fre(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: frame recv\n");
- return 0;
-}
-
-static int sh_irda_xir_trov(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: buffer ram over\n");
- return 0;
-}
-
-static int sh_irda_xir_9(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: time over\n");
- return 0;
-}
-
-static int sh_irda_xir_8(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: framing error\n");
- return 0;
-}
-
-static int sh_irda_xir_fte(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- dev_err(dev, "none mode: frame transmit end\n");
- return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_xir_func = {
- .xir_fre = sh_irda_xir_fre,
- .xir_trov = sh_irda_xir_trov,
- .xir_9 = sh_irda_xir_9,
- .xir_8 = sh_irda_xir_8,
- .xir_fte = sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- * MIR/FIR MODE
- *
- * MIR/FIR are not supported now
- *=====================================*/
-static struct sh_irda_xir_func sh_irda_mfir_func = {
- .xir_fre = sh_irda_xir_fre,
- .xir_trov = sh_irda_xir_trov,
- .xir_9 = sh_irda_xir_9,
- .xir_8 = sh_irda_xir_8,
- .xir_fte = sh_irda_xir_fte,
-};
-
-/*=====================================
- *
- * SIR MODE
- *
- *=====================================*/
-static int sh_irda_sir_fre(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- u16 data16;
- u8 *data = (u8 *)&data16;
- int len = sh_irda_get_rcv_length(self);
- int i, j;
-
- if (len > IRDARAM_LEN)
- len = IRDARAM_LEN;
-
- dev_dbg(dev, "frame recv length = %d\n", len);
-
- for (i = 0; i < len; i++) {
- j = i % 2;
- if (!j)
- data16 = sh_irda_read(self, IRDARAM + i);
-
- async_unwrap_char(self->ndev, &self->ndev->stats,
- &self->rx_buff, data[j]);
- }
- self->ndev->last_rx = jiffies;
-
- sh_irda_rcv_ctrl(self, 1);
-
- return 0;
-}
-
-static int sh_irda_sir_trov(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "buffer ram over\n");
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_tot(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "time over\n");
- sh_irda_set_baudrate(self, 9600);
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_fer(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_err(dev, "framing error\n");
- sh_irda_rcv_ctrl(self, 1);
- return 0;
-}
-
-static int sh_irda_sir_fte(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
-
- dev_dbg(dev, "frame transmit end\n");
- netif_wake_queue(self->ndev);
-
- return 0;
-}
-
-static struct sh_irda_xir_func sh_irda_sir_func = {
- .xir_fre = sh_irda_sir_fre,
- .xir_trov = sh_irda_sir_trov,
- .xir_9 = sh_irda_sir_tot,
- .xir_8 = sh_irda_sir_fer,
- .xir_fte = sh_irda_sir_fte,
-};
-
-static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
-{
- struct device *dev = &self->ndev->dev;
- struct sh_irda_xir_func *func;
- const char *name;
- u16 data;
-
- switch (mode) {
- case SH_IRDA_SIR:
- name = "SIR";
- data = TMD_SIR;
- func = &sh_irda_sir_func;
- break;
- case SH_IRDA_MIR:
- name = "MIR";
- data = TMD_MIR;
- func = &sh_irda_mfir_func;
- break;
- case SH_IRDA_FIR:
- name = "FIR";
- data = TMD_FIR;
- func = &sh_irda_mfir_func;
- break;
- default:
- name = "NONE";
- data = 0;
- func = &sh_irda_xir_func;
- break;
- }
-
- self->mode = mode;
- self->xir_func = func;
- sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
-
- dev_dbg(dev, "switch to %s mode", name);
-}
-
-/************************************************************************
-
-
- irq function
-
-
-************************************************************************/
-static void sh_irda_set_irq_mask(struct sh_irda_self *self)
-{
- u16 tmr_hole;
- u16 xir_reg;
-
- /* set all mask */
- sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
- sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
- sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
-
- /* clear irq */
- sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
- sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
-
- switch (self->mode) {
- case SH_IRDA_SIR:
- tmr_hole = SIM;
- xir_reg = SIRIMR;
- break;
- case SH_IRDA_MIR:
- case SH_IRDA_FIR:
- tmr_hole = MIM;
- xir_reg = MFIRIMR;
- break;
- default:
- tmr_hole = 0;
- xir_reg = 0;
- break;
- }
-
- /* open mask */
- if (xir_reg) {
- sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
- sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
- }
-}
-
-static irqreturn_t sh_irda_irq(int irq, void *dev_id)
-{
- struct sh_irda_self *self = dev_id;
- struct sh_irda_xir_func *func = self->xir_func;
- u16 isr = sh_irda_read(self, SIRISR);
-
- /* clear irq */
- sh_irda_write(self, SIRICR, isr);
-
- if (isr & FRE)
- func->xir_fre(self);
- if (isr & TROV)
- func->xir_trov(self);
- if (isr & xIR_9)
- func->xir_9(self);
- if (isr & xIR_8)
- func->xir_8(self);
- if (isr & FTE)
- func->xir_fte(self);
-
- return IRQ_HANDLED;
-}
-
-/************************************************************************
-
-
- CRC function
-
-
-************************************************************************/
-static void sh_irda_crc_reset(struct sh_irda_self *self)
-{
- sh_irda_write(self, CRCCTR, CRC_RST);
-}
-
-static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
-{
- sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
-}
-
-static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
-{
- return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
-}
-
-static u16 sh_irda_crc_out(struct sh_irda_self *self)
-{
- return sh_irda_read(self, CRCOR);
-}
-
-static int sh_irda_crc_init(struct sh_irda_self *self)
-{
- struct device *dev = &self->ndev->dev;
- int ret = -EIO;
- u16 val;
-
- sh_irda_crc_reset(self);
-
- sh_irda_crc_add(self, 0xCC);
- sh_irda_crc_add(self, 0xF5);
- sh_irda_crc_add(self, 0xF1);
- sh_irda_crc_add(self, 0xA7);
-
- val = sh_irda_crc_cnt(self);
- if (4 != val) {
- dev_err(dev, "CRC count error %x\n", val);
- goto crc_init_out;
- }
-
- val = sh_irda_crc_out(self);
- if (0x51DF != val) {
- dev_err(dev, "CRC result error%x\n", val);
- goto crc_init_out;
- }
-
- ret = 0;
-
-crc_init_out:
-
- sh_irda_crc_reset(self);
- return ret;
-}
-
-/************************************************************************
-
-
- iobuf function
-
-
-************************************************************************/
-static void sh_irda_remove_iobuf(struct sh_irda_self *self)
-{
- kfree(self->rx_buff.head);
-
- self->tx_buff.head = NULL;
- self->tx_buff.data = NULL;
- self->rx_buff.head = NULL;
- self->rx_buff.data = NULL;
-}
-
-static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
-{
- if (self->rx_buff.head ||
- self->tx_buff.head) {
- dev_err(&self->ndev->dev, "iobuff has already existed.");
- return -EINVAL;
- }
-
- /* rx_buff */
- self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
- if (!self->rx_buff.head)
- return -ENOMEM;
-
- self->rx_buff.truesize = rxsize;
- self->rx_buff.in_frame = FALSE;
- self->rx_buff.state = OUTSIDE_FRAME;
- self->rx_buff.data = self->rx_buff.head;
-
- /* tx_buff */
- self->tx_buff.head = self->membase + IRDARAM;
- self->tx_buff.truesize = IRDARAM_LEN;
-
- return 0;
-}
-
-/************************************************************************
-
-
- net_device_ops function
-
-
-************************************************************************/
-static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
- struct device *dev = &self->ndev->dev;
- int speed = irda_get_next_speed(skb);
- int ret;
-
- dev_dbg(dev, "hard xmit\n");
-
- netif_stop_queue(ndev);
- sh_irda_rcv_ctrl(self, 0);
-
- ret = sh_irda_set_baudrate(self, speed);
- if (ret < 0)
- goto sh_irda_hard_xmit_end;
-
- self->tx_buff.len = 0;
- if (skb->len) {
- unsigned long flags;
-
- spin_lock_irqsave(&self->lock, flags);
- self->tx_buff.len = async_wrap_skb(skb,
- self->tx_buff.head,
- self->tx_buff.truesize);
- spin_unlock_irqrestore(&self->lock, flags);
-
- if (self->tx_buff.len > self->tx_buff.truesize)
- self->tx_buff.len = self->tx_buff.truesize;
-
- sh_irda_write(self, IRTFLR, self->tx_buff.len);
- sh_irda_write(self, IRTCTR, ARMOD | TE);
- } else
- goto sh_irda_hard_xmit_end;
-
- dev_kfree_skb(skb);
-
- return 0;
-
-sh_irda_hard_xmit_end:
- sh_irda_set_baudrate(self, 9600);
- netif_wake_queue(self->ndev);
- sh_irda_rcv_ctrl(self, 1);
- dev_kfree_skb(skb);
-
- return ret;
-
-}
-
-static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
-{
- /*
- * FIXME
- *
- * This function is needed for irda framework.
- * But nothing to do now
- */
- return 0;
-}
-
-static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
-
- return &self->ndev->stats;
-}
-
-static int sh_irda_open(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
- int err;
-
- pm_runtime_get_sync(&self->pdev->dev);
- err = sh_irda_crc_init(self);
- if (err)
- goto open_err;
-
- sh_irda_set_mode(self, SH_IRDA_SIR);
- sh_irda_set_timeout(self, 2);
- sh_irda_set_baudrate(self, 9600);
-
- self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
- if (!self->irlap) {
- err = -ENODEV;
- goto open_err;
- }
-
- netif_start_queue(ndev);
- sh_irda_rcv_ctrl(self, 1);
- sh_irda_set_irq_mask(self);
-
- dev_info(&ndev->dev, "opened\n");
-
- return 0;
-
-open_err:
- pm_runtime_put_sync(&self->pdev->dev);
-
- return err;
-}
-
-static int sh_irda_stop(struct net_device *ndev)
-{
- struct sh_irda_self *self = netdev_priv(ndev);
-
- /* Stop IrLAP */
- if (self->irlap) {
- irlap_close(self->irlap);
- self->irlap = NULL;
- }
-
- netif_stop_queue(ndev);
- pm_runtime_put_sync(&self->pdev->dev);
-
- dev_info(&ndev->dev, "stopped\n");
-
- return 0;
-}
-
-static const struct net_device_ops sh_irda_ndo = {
- .ndo_open = sh_irda_open,
- .ndo_stop = sh_irda_stop,
- .ndo_start_xmit = sh_irda_hard_xmit,
- .ndo_do_ioctl = sh_irda_ioctl,
- .ndo_get_stats = sh_irda_stats,
-};
-
-/************************************************************************
-
-
- platform_driver function
-
-
-************************************************************************/
-static int sh_irda_probe(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct sh_irda_self *self;
- struct resource *res;
- int irq;
- int err = -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq = platform_get_irq(pdev, 0);
- if (!res || irq < 0) {
- dev_err(&pdev->dev, "Not enough platform resources.\n");
- goto exit;
- }
-
- ndev = alloc_irdadev(sizeof(*self));
- if (!ndev)
- goto exit;
-
- self = netdev_priv(ndev);
- self->membase = ioremap_nocache(res->start, resource_size(res));
- if (!self->membase) {
- err = -ENXIO;
- dev_err(&pdev->dev, "Unable to ioremap.\n");
- goto err_mem_1;
- }
-
- err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
- if (err)
- goto err_mem_2;
-
- self->pdev = pdev;
- pm_runtime_enable(&pdev->dev);
-
- irda_init_max_qos_capabilies(&self->qos);
-
- ndev->netdev_ops = &sh_irda_ndo;
- ndev->irq = irq;
-
- self->ndev = ndev;
- self->qos.baud_rate.bits &= IR_9600; /* FIXME */
- self->qos.min_turn_time.bits = 1; /* 10 ms or more */
- spin_lock_init(&self->lock);
-
- irda_qos_bits_to_value(&self->qos);
-
- err = register_netdev(ndev);
- if (err)
- goto err_mem_4;
-
- platform_set_drvdata(pdev, ndev);
- err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self);
- if (err) {
- dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
- goto err_mem_4;
- }
-
- dev_info(&pdev->dev, "SuperH IrDA probed\n");
-
- goto exit;
-
-err_mem_4:
- pm_runtime_disable(&pdev->dev);
- sh_irda_remove_iobuf(self);
-err_mem_2:
- iounmap(self->membase);
-err_mem_1:
- free_netdev(ndev);
-exit:
- return err;
-}
-
-static int sh_irda_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct sh_irda_self *self = netdev_priv(ndev);
-
- if (!self)
- return 0;
-
- unregister_netdev(ndev);
- pm_runtime_disable(&pdev->dev);
- sh_irda_remove_iobuf(self);
- iounmap(self->membase);
- free_netdev(ndev);
-
- return 0;
-}
-
-static int sh_irda_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-
-static const struct dev_pm_ops sh_irda_pm_ops = {
- .runtime_suspend = sh_irda_runtime_nop,
- .runtime_resume = sh_irda_runtime_nop,
-};
-
-static struct platform_driver sh_irda_driver = {
- .probe = sh_irda_probe,
- .remove = sh_irda_remove,
- .driver = {
- .name = DRIVER_NAME,
- .pm = &sh_irda_pm_ops,
- },
-};
-
-module_platform_driver(sh_irda_driver);
-
-MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
-MODULE_DESCRIPTION("SuperH IrDA driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index b455ffe88..dcf92ba80 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -862,7 +862,7 @@ static void smsc_ircc_timeout(struct net_device *dev)
spin_lock_irqsave(&self->lock, flags);
smsc_ircc_sir_start(self);
smsc_ircc_change_speed(self, self->io.speed);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&self->lock, flags);
}
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 83cc48a01..42da094b6 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -718,7 +718,7 @@ static void stir_send(struct stir_cb *stir, struct sk_buff *skb)
stir->netdev->stats.tx_packets++;
stir->netdev->stats.tx_bytes += skb->len;
- stir->netdev->trans_start = jiffies;
+ netif_trans_update(stir->netdev);
pr_debug("send %d (%d)\n", skb->len, wraplen);
if (usb_bulk_msg(stir->usbdev, usb_sndbulkpipe(stir->usbdev, 1),
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6960d4cd3..ca4442a9d 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -774,7 +774,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
/* Check for empty frame */
if (!skb->len) {
via_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -821,7 +821,7 @@ static netdev_tx_t via_ircc_hard_xmit_sir(struct sk_buff *skb,
RXStart(iobase, OFF);
TXStart(iobase, ON);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&self->lock, flags);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -849,7 +849,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
if ((speed != self->io.speed) && (speed != -1)) {
if (!skb->len) {
via_ircc_change_speed(self, speed);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
} else
@@ -869,7 +869,7 @@ static netdev_tx_t via_ircc_hard_xmit_fir(struct sk_buff *skb,
via_ircc_dma_xmit(self, iobase);
//F01 }
//F01 if (self->tx_fifo.free < (MAX_TX_WINDOW -1 )) netif_wake_queue(self->netdev);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb(skb);
spin_unlock_irqrestore(&self->lock, flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 8f3c55d03..a70b6c460 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -605,12 +605,41 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err)
dev_put(dev);
}
+static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
+ unsigned char **iv,
+ struct scatterlist **sg)
+{
+ size_t size, iv_offset, sg_offset;
+ struct aead_request *req;
+ void *tmp;
+
+ size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
+ iv_offset = size;
+ size += GCM_AES_IV_LEN;
+
+ size = ALIGN(size, __alignof__(struct scatterlist));
+ sg_offset = size;
+ size += sizeof(struct scatterlist) * (MAX_SKB_FRAGS + 1);
+
+ tmp = kmalloc(size, GFP_ATOMIC);
+ if (!tmp)
+ return NULL;
+
+ *iv = (unsigned char *)(tmp + iv_offset);
+ *sg = (struct scatterlist *)(tmp + sg_offset);
+ req = tmp;
+
+ aead_request_set_tfm(req, tfm);
+
+ return req;
+}
+
static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
struct net_device *dev)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct ethhdr *eth;
struct macsec_eth_header *hh;
size_t unprotected_len;
@@ -668,8 +697,6 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
macsec_fill_sectag(hh, secy, pn);
macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
- macsec_fill_iv(iv, secy->sci, pn);
-
skb_put(skb, secy->icv_len);
if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
@@ -684,13 +711,15 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
return ERR_PTR(-EINVAL);
}
- req = aead_request_alloc(tx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg);
if (!req) {
macsec_txsa_put(tx_sa);
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
}
+ macsec_fill_iv(iv, secy->sci, pn);
+
sg_init_table(sg, MAX_SKB_FRAGS + 1);
skb_to_sgvec(skb, sg, 0, skb->len);
@@ -861,7 +890,6 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err)
out:
macsec_rxsa_put(rx_sa);
dev_put(dev);
- return;
}
static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
@@ -871,8 +899,8 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
struct macsec_secy *secy)
{
int ret;
- struct scatterlist sg[MAX_SKB_FRAGS + 1];
- unsigned char iv[GCM_AES_IV_LEN];
+ struct scatterlist *sg;
+ unsigned char *iv;
struct aead_request *req;
struct macsec_eth_header *hdr;
u16 icv_len = secy->icv_len;
@@ -882,7 +910,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
if (!skb)
return ERR_PTR(-ENOMEM);
- req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
+ req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg);
if (!req) {
kfree_skb(skb);
return ERR_PTR(-ENOMEM);
@@ -914,7 +942,6 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
}
macsec_skb_cb(skb)->req = req;
- macsec_skb_cb(skb)->rx_sa = rx_sa;
skb->dev = dev;
aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
@@ -1141,6 +1168,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
}
}
+ macsec_skb_cb(skb)->rx_sa = rx_sa;
+
/* Disabled && !changed text => skip validation */
if (hdr->tci_an & MACSEC_TCI_C ||
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
@@ -1234,7 +1263,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (!tfm || IS_ERR(tfm))
return NULL;
@@ -1408,9 +1437,10 @@ static sci_t nla_get_sci(const struct nlattr *nla)
return (__force sci_t)nla_get_u64(nla);
}
-static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value)
+static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
+ int padattr)
{
- return nla_put_u64(skb, attrtype, (__force u64)value);
+ return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
}
static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
@@ -2143,16 +2173,36 @@ static int copy_rx_sc_stats(struct sk_buff *skb,
sum.InPktsUnusedSA += tmp.InPktsUnusedSA;
}
- if (nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED, sum.InOctetsValidated) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED, sum.InOctetsDecrypted) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED, sum.InPktsUnchecked) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED, sum.InPktsDelayed) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK, sum.InPktsOK) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID, sum.InPktsInvalid) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE, sum.InPktsLate) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID, sum.InPktsNotValid) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA, sum.InPktsNotUsingSA) ||
- nla_put_u64(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA, sum.InPktsUnusedSA))
+ if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
+ sum.InOctetsValidated,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
+ sum.InOctetsDecrypted,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
+ sum.InPktsUnchecked,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
+ sum.InPktsDelayed,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
+ sum.InPktsOK,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
+ sum.InPktsInvalid,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
+ sum.InPktsLate,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
+ sum.InPktsNotValid,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
+ sum.InPktsNotUsingSA,
+ MACSEC_RXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
+ sum.InPktsUnusedSA,
+ MACSEC_RXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2181,10 +2231,18 @@ static int copy_tx_sc_stats(struct sk_buff *skb,
sum.OutOctetsEncrypted += tmp.OutOctetsEncrypted;
}
- if (nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED, sum.OutPktsProtected) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED, sum.OutPktsEncrypted) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED, sum.OutOctetsProtected) ||
- nla_put_u64(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED, sum.OutOctetsEncrypted))
+ if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
+ sum.OutPktsProtected,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
+ sum.OutPktsEncrypted,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
+ sum.OutOctetsProtected,
+ MACSEC_TXSC_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
+ sum.OutOctetsEncrypted,
+ MACSEC_TXSC_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2217,14 +2275,30 @@ static int copy_secy_stats(struct sk_buff *skb,
sum.InPktsOverrun += tmp.InPktsOverrun;
}
- if (nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED, sum.OutPktsUntagged) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED, sum.InPktsUntagged) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG, sum.OutPktsTooLong) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG, sum.InPktsNoTag) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG, sum.InPktsBadTag) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI, sum.InPktsUnknownSCI) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI, sum.InPktsNoSCI) ||
- nla_put_u64(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN, sum.InPktsOverrun))
+ if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
+ sum.OutPktsUntagged,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
+ sum.InPktsUntagged,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
+ sum.OutPktsTooLong,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
+ sum.InPktsNoTag,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
+ sum.InPktsBadTag,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
+ sum.InPktsUnknownSCI,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
+ sum.InPktsNoSCI,
+ MACSEC_SECY_STATS_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
+ sum.InPktsOverrun,
+ MACSEC_SECY_STATS_ATTR_PAD))
return -EMSGSIZE;
return 0;
@@ -2238,9 +2312,11 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
if (!secy_nest)
return 1;
- if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
- nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID) ||
+ if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
+ MACSEC_SECY_ATTR_PAD) ||
+ nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID,
+ MACSEC_SECY_ATTR_PAD) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2366,7 +2442,8 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
}
if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
- nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci)) {
+ nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
+ MACSEC_RXSC_ATTR_PAD)) {
nla_nest_cancel(skb, rxsc_nest);
nla_nest_cancel(skb, rxsc_list);
goto nla_put_failure;
@@ -2844,7 +2921,7 @@ static void macsec_free_netdev(struct net_device *dev)
static void macsec_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->tx_queue_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
dev->netdev_ops = &macsec_netdev_ops;
dev->destructor = macsec_free_netdev;
@@ -3171,9 +3248,9 @@ static struct net *macsec_get_link_net(const struct net_device *dev)
static size_t macsec_get_size(const struct net_device *dev)
{
return 0 +
- nla_total_size(8) + /* SCI */
+ nla_total_size_64bit(8) + /* SCI */
nla_total_size(1) + /* ICV_LEN */
- nla_total_size(8) + /* CIPHER_SUITE */
+ nla_total_size_64bit(8) + /* CIPHER_SUITE */
nla_total_size(4) + /* WINDOW */
nla_total_size(1) + /* ENCODING_SA */
nla_total_size(1) + /* ENCRYPT */
@@ -3192,10 +3269,11 @@ static int macsec_fill_info(struct sk_buff *skb,
struct macsec_secy *secy = &macsec_priv(dev)->secy;
struct macsec_tx_sc *tx_sc = &secy->tx_sc;
- if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
+ if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
+ IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
- nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
- MACSEC_DEFAULT_CIPHER_ID) ||
+ nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID, IFLA_MACSEC_PAD) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
@@ -3313,6 +3391,7 @@ static void __exit macsec_exit(void)
genl_unregister_family(&macsec_fam);
rtnl_link_unregister(&macsec_link_ops);
unregister_netdevice_notifier(&macsec_notifier);
+ rcu_barrier();
}
module_init(macsec_init);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 2bcf1f321..cb01023ea 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -795,6 +795,7 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
+ struct macvlan_port *port = vlan->port;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -812,6 +813,8 @@ static int macvlan_init(struct net_device *dev)
if (!vlan->pcpu_stats)
return -ENOMEM;
+ port->count += 1;
+
return 0;
}
@@ -1312,10 +1315,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
return err;
}
- port->count += 1;
err = register_netdevice(dev);
if (err < 0)
- goto destroy_port;
+ return err;
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
@@ -1330,10 +1332,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
unregister_netdev:
unregister_netdevice(dev);
-destroy_port:
- port->count -= 1;
- if (!port->count)
- macvlan_port_destroy(lowerdev);
return err;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9a35aa462..bd6720962 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -129,7 +129,18 @@ static DEFINE_MUTEX(minor_lock);
static DEFINE_IDR(minor_idr);
#define GOODCOPY_LEN 128
-static struct class *macvtap_class;
+static const void *macvtap_net_namespace(struct device *d)
+{
+ struct net_device *dev = to_net_dev(d->parent);
+ return dev_net(dev);
+}
+
+static struct class macvtap_class = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .ns_type = &net_ns_type_operations,
+ .namespace = macvtap_net_namespace,
+};
static struct cdev macvtap_cdev;
static const struct proto_ops macvtap_socket_ops;
@@ -1278,10 +1289,12 @@ static int macvtap_device_event(struct notifier_block *unused,
struct device *classdev;
dev_t devt;
int err;
+ char tap_name[IFNAMSIZ];
if (dev->rtnl_link_ops != &macvtap_link_ops)
return NOTIFY_DONE;
+ snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
vlan = netdev_priv(dev);
switch (event) {
@@ -1295,16 +1308,24 @@ static int macvtap_device_event(struct notifier_block *unused,
return notifier_from_errno(err);
devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
- classdev = device_create(macvtap_class, &dev->dev, devt,
- dev, "tap%d", dev->ifindex);
+ classdev = device_create(&macvtap_class, &dev->dev, devt,
+ dev, tap_name);
if (IS_ERR(classdev)) {
macvtap_free_minor(vlan);
return notifier_from_errno(PTR_ERR(classdev));
}
+ err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
+ tap_name);
+ if (err)
+ return notifier_from_errno(err);
break;
case NETDEV_UNREGISTER:
+ /* vlan->minor == 0 if NETDEV_REGISTER above failed */
+ if (vlan->minor == 0)
+ break;
+ sysfs_remove_link(&dev->dev.kobj, tap_name);
devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
- device_destroy(macvtap_class, devt);
+ device_destroy(&macvtap_class, devt);
macvtap_free_minor(vlan);
break;
}
@@ -1330,11 +1351,9 @@ static int macvtap_init(void)
if (err)
goto out2;
- macvtap_class = class_create(THIS_MODULE, "macvtap");
- if (IS_ERR(macvtap_class)) {
- err = PTR_ERR(macvtap_class);
+ err = class_register(&macvtap_class);
+ if (err)
goto out3;
- }
err = register_netdevice_notifier(&macvtap_notifier_block);
if (err)
@@ -1349,7 +1368,7 @@ static int macvtap_init(void)
out5:
unregister_netdevice_notifier(&macvtap_notifier_block);
out4:
- class_unregister(macvtap_class);
+ class_unregister(&macvtap_class);
out3:
cdev_del(&macvtap_cdev);
out2:
@@ -1363,7 +1382,7 @@ static void macvtap_exit(void)
{
rtnl_link_unregister(&macvtap_link_ops);
unregister_netdevice_notifier(&macvtap_notifier_block);
- class_unregister(macvtap_class);
+ class_unregister(&macvtap_class);
cdev_del(&macvtap_cdev);
unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
idr_destroy(&minor_idr);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 2afa61b51..91177a4a3 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -57,6 +57,7 @@
/* PHY CTRL bits */
#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+#define DP83867_PHYCR_FIFO_DEPTH_MASK (3 << 14)
/* RGMIIDCTL bits */
#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
@@ -133,8 +134,8 @@ static int dp83867_of_init(struct phy_device *phydev)
static int dp83867_config_init(struct phy_device *phydev)
{
struct dp83867_private *dp83867;
- int ret;
- u16 val, delay;
+ int ret, val;
+ u16 delay;
if (!phydev->priv) {
dp83867 = devm_kzalloc(&phydev->mdio.dev, sizeof(*dp83867),
@@ -151,8 +152,12 @@ static int dp83867_config_init(struct phy_device *phydev)
}
if (phy_interface_is_rgmii(phydev)) {
- ret = phy_write(phydev, MII_DP83867_PHYCTRL,
- (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ val = phy_read(phydev, MII_DP83867_PHYCTRL);
+ if (val < 0)
+ return val;
+ val &= ~DP83867_PHYCR_FIFO_DEPTH_MASK;
+ val |= (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT);
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
if (ret)
return ret;
}
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fc07a8866..9ec7f7353 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/gpio.h>
+#include <linux/idr.h>
#define MII_REGS_NUM 29
@@ -255,7 +256,8 @@ int fixed_phy_add(unsigned int irq, int phy_addr,
memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM);
- fmb->mii_bus->irq[phy_addr] = irq;
+ if (irq != PHY_POLL)
+ fmb->mii_bus->irq[phy_addr] = irq;
fp->addr = phy_addr;
fp->status = *status;
@@ -285,6 +287,8 @@ err_regs:
}
EXPORT_SYMBOL_GPL(fixed_phy_add);
+static DEFINE_IDA(phy_fixed_ida);
+
static void fixed_phy_del(int phy_addr)
{
struct fixed_mdio_bus *fmb = &platform_fmb;
@@ -296,14 +300,12 @@ static void fixed_phy_del(int phy_addr)
if (gpio_is_valid(fp->link_gpio))
gpio_free(fp->link_gpio);
kfree(fp);
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return;
}
}
}
-static int phy_fixed_addr;
-static DEFINE_SPINLOCK(phy_fixed_addr_lock);
-
struct phy_device *fixed_phy_register(unsigned int irq,
struct fixed_phy_status *status,
int link_gpio,
@@ -314,21 +316,22 @@ struct phy_device *fixed_phy_register(unsigned int irq,
int phy_addr;
int ret;
+ if (!fmb->mii_bus || fmb->mii_bus->state != MDIOBUS_REGISTERED)
+ return ERR_PTR(-EPROBE_DEFER);
+
/* Get the next available PHY address, up to PHY_MAX_ADDR */
- spin_lock(&phy_fixed_addr_lock);
- if (phy_fixed_addr == PHY_MAX_ADDR) {
- spin_unlock(&phy_fixed_addr_lock);
- return ERR_PTR(-ENOSPC);
- }
- phy_addr = phy_fixed_addr++;
- spin_unlock(&phy_fixed_addr_lock);
+ phy_addr = ida_simple_get(&phy_fixed_ida, 0, PHY_MAX_ADDR, GFP_KERNEL);
+ if (phy_addr < 0)
+ return ERR_PTR(phy_addr);
ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
- if (ret < 0)
+ if (ret < 0) {
+ ida_simple_remove(&phy_fixed_ida, phy_addr);
return ERR_PTR(ret);
+ }
phy = get_phy_device(fmb->mii_bus, phy_addr, false);
- if (!phy || IS_ERR(phy)) {
+ if (IS_ERR(phy)) {
fixed_phy_del(phy_addr);
return ERR_PTR(-EINVAL);
}
@@ -430,6 +433,7 @@ static void __exit fixed_mdio_bus_exit(void)
list_del(&fp->node);
kfree(fp);
}
+ ida_destroy(&phy_fixed_ida);
}
module_exit(fixed_mdio_bus_exit);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index f6078376e..b9fde1bcf 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -80,23 +80,15 @@ static int lxt970_ack_interrupt(struct phy_device *phydev)
static int lxt970_config_intr(struct phy_device *phydev)
{
- int err;
-
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
+ return phy_write(phydev, MII_LXT970_IER, MII_LXT970_IER_IEN);
else
- err = phy_write(phydev, MII_LXT970_IER, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT970_IER, 0);
}
static int lxt970_config_init(struct phy_device *phydev)
{
- int err;
-
- err = phy_write(phydev, MII_LXT970_CONFIG, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT970_CONFIG, 0);
}
@@ -112,14 +104,10 @@ static int lxt971_ack_interrupt(struct phy_device *phydev)
static int lxt971_config_intr(struct phy_device *phydev)
{
- int err;
-
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
- err = phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
+ return phy_write(phydev, MII_LXT971_IER, MII_LXT971_IER_IEN);
else
- err = phy_write(phydev, MII_LXT971_IER, 0);
-
- return err;
+ return phy_write(phydev, MII_LXT971_IER, 0);
}
/*
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 280e8795b..ec2c1eee6 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -285,6 +285,48 @@ static int marvell_config_aneg(struct phy_device *phydev)
return 0;
}
+static int m88e1111_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* The Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ err = marvell_set_polarity(phydev, phydev->mdix);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1111_PHY_LED_CONTROL,
+ MII_M1111_PHY_LED_DIRECT);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ int bmcr;
+
+ /* A write to speed/duplex bits (that is performed by
+ * genphy_config_aneg() call above) must be followed by
+ * a software reset. Otherwise, the write has no effect.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ err = phy_write(phydev, MII_BMCR, bmcr | BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@@ -407,15 +449,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
-
- phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
- phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
- phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
-
- err = genphy_config_aneg(phydev);
-
- return err;
+ return genphy_config_aneg(phydev);
}
static int m88e1318_config_aneg(struct phy_device *phydev)
@@ -636,6 +670,28 @@ static int m88e1111_config_init(struct phy_device *phydev)
return phy_write(phydev, MII_BMCR, BMCR_RESET);
}
+static int m88e1121_config_init(struct phy_device *phydev)
+{
+ int err, oldpage;
+
+ oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+
+ err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+ if (err < 0)
+ return err;
+
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ err = phy_write(phydev, MII_88E1121_PHY_LED_CTRL,
+ MII_88E1121_PHY_LED_DEF);
+ if (err < 0)
+ return err;
+
+ phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
+
+ /* Set marvell,reg-init configuration from device tree */
+ return marvell_config_init(phydev);
+}
+
static int m88e1510_config_init(struct phy_device *phydev)
{
int err;
@@ -668,7 +724,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
}
- return marvell_config_init(phydev);
+ return m88e1121_config_init(phydev);
}
static int m88e1118_config_aneg(struct phy_device *phydev)
@@ -1161,7 +1217,7 @@ static struct phy_driver marvell_drivers[] = {
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
.config_init = &m88e1111_config_init,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1111_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
@@ -1196,7 +1252,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1215,7 +1271,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
- .config_init = &marvell_config_init,
+ .config_init = &m88e1121_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 308ade0eb..5c81d6faf 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -45,13 +45,7 @@ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum)
struct mdio_mux_parent_bus *pb = cb->parent;
int r;
- /* In theory multiple mdio_mux could be stacked, thus creating
- * more than a single level of nesting. But in practice,
- * SINGLE_DEPTH_NESTING will cover the vast majority of use
- * cases. We use it, instead of trying to handle the general
- * case.
- */
- mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
@@ -76,7 +70,7 @@ static int mdio_mux_write(struct mii_bus *bus, int phy_id,
int r;
- mutex_lock_nested(&pb->mii_bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX);
r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data);
if (r)
goto out;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 0cba64f1e..09deef4be 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -333,7 +333,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner)
struct phy_device *phydev;
phydev = mdiobus_scan(bus, i);
- if (IS_ERR(phydev)) {
+ if (IS_ERR(phydev) && (PTR_ERR(phydev) != -ENODEV)) {
err = PTR_ERR(phydev);
goto error;
}
@@ -419,7 +419,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
int err;
phydev = get_phy_device(bus, addr, false);
- if (IS_ERR(phydev) || phydev == NULL)
+ if (IS_ERR(phydev))
return phydev;
/*
@@ -431,7 +431,7 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr)
err = phy_device_register(phydev);
if (err) {
phy_device_free(phydev);
- return NULL;
+ return ERR_PTR(-ENODEV);
}
return phydev;
@@ -457,7 +457,7 @@ int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum)
BUG_ON(in_interrupt());
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
retval = bus->read(bus, addr, regnum);
mutex_unlock(&bus->mdio_lock);
@@ -509,7 +509,7 @@ int mdiobus_write_nested(struct mii_bus *bus, int addr, u32 regnum, u16 val)
BUG_ON(in_interrupt());
- mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
+ mutex_lock_nested(&bus->mdio_lock, MDIO_MUTEX_NESTED);
err = bus->write(bus, addr, regnum, val);
mutex_unlock(&bus->mdio_lock);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4516c8a4f..5a8fefc25 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -726,7 +726,7 @@ static int kszphy_probe(struct phy_device *phydev)
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KS8737",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -781,7 +781,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -800,7 +800,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8041RNLI",
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
@@ -819,7 +819,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8051,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8051",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
@@ -857,7 +857,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8081_type,
@@ -875,7 +875,7 @@ static struct phy_driver ksphy_driver[] = {
}, {
.phy_id = PHY_ID_KSZ8061,
.name = "Micrel KSZ8061",
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
@@ -909,7 +909,7 @@ static struct phy_driver ksphy_driver[] = {
.write_mmd_indirect = ksz9021_wr_mmd_phyreg,
}, {
.phy_id = PHY_ID_KSZ9031,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ9031 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -926,7 +926,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8873MLL Switch",
.features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG,
@@ -940,7 +940,7 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
}, {
.phy_id = PHY_ID_KSZ886X,
- .phy_id_mask = 0x00fffff0,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ886X Switch",
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -962,17 +962,17 @@ MODULE_LICENSE("GPL");
static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ9021, 0x000ffffe },
- { PHY_ID_KSZ9031, 0x00fffff0 },
+ { PHY_ID_KSZ9031, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8001, 0x00ffffff },
- { PHY_ID_KS8737, 0x00fffff0 },
+ { PHY_ID_KS8737, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8021, 0x00ffffff },
{ PHY_ID_KSZ8031, 0x00ffffff },
- { PHY_ID_KSZ8041, 0x00fffff0 },
- { PHY_ID_KSZ8051, 0x00fffff0 },
- { PHY_ID_KSZ8061, 0x00fffff0 },
- { PHY_ID_KSZ8081, 0x00fffff0 },
- { PHY_ID_KSZ8873MLL, 0x00fffff0 },
- { PHY_ID_KSZ886X, 0x00fffff0 },
+ { PHY_ID_KSZ8041, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8051, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8061, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 445fc5aef..c5dc2c363 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -362,6 +362,60 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd)
}
EXPORT_SYMBOL(phy_ethtool_sset);
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u8 autoneg = cmd->base.autoneg;
+ u8 duplex = cmd->base.duplex;
+ u32 speed = cmd->base.speed;
+ u32 advertising;
+
+ if (cmd->base.phy_address != phydev->mdio.addr)
+ return -EINVAL;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ /* We make sure that we don't pass unsupported values in to the PHY */
+ advertising &= phydev->supported;
+
+ /* Verify the settings we care about. */
+ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_ENABLE && advertising == 0)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (duplex != DUPLEX_HALF &&
+ duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ phydev->autoneg = autoneg;
+
+ phydev->speed = speed;
+
+ phydev->advertising = advertising;
+
+ if (autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ phydev->duplex = duplex;
+
+ phydev->mdix = cmd->base.eth_tp_mdix_ctrl;
+
+ /* Restart the PHY */
+ phy_start_aneg(phydev);
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
{
cmd->supported = phydev->supported;
@@ -385,6 +439,33 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
}
EXPORT_SYMBOL(phy_ethtool_gset);
+int phy_ethtool_ksettings_get(struct phy_device *phydev,
+ struct ethtool_link_ksettings *cmd)
+{
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ phydev->supported);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ phydev->advertising);
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ phydev->lp_advertising);
+
+ cmd->base.speed = phydev->speed;
+ cmd->base.duplex = phydev->duplex;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->base.port = PORT_BNC;
+ else
+ cmd->base.port = PORT_MII;
+
+ cmd->base.phy_address = phydev->mdio.addr;
+ cmd->base.autoneg = phydev->autoneg;
+ cmd->base.eth_tp_mdix_ctrl = phydev->mdix;
+
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_get);
+
/**
* phy_mii_ioctl - generic PHY MII ioctl interface
* @phydev: the phy_device struct
@@ -1268,3 +1349,27 @@ void phy_ethtool_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol)
phydev->drv->get_wol(phydev, wol);
}
EXPORT_SYMBOL(phy_ethtool_get_wol);
+
+int phy_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_get(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_get_link_ksettings);
+
+int phy_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct phy_device *phydev = ndev->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+EXPORT_SYMBOL(phy_ethtool_set_link_ksettings);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e551f3a89..e977ba931 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -529,7 +529,7 @@ struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
/* If the phy_id is mostly Fs, there is no device there */
if ((phy_id & 0x1fffffff) == 0x1fffffff)
- return NULL;
+ return ERR_PTR(-ENODEV);
return phy_device_create(bus, addr, phy_id, is_c45, &c45_ids);
}
@@ -1123,8 +1123,9 @@ static int genphy_config_advert(struct phy_device *phydev)
*/
int genphy_setup_forced(struct phy_device *phydev)
{
- int ctl = 0;
+ int ctl = phy_read(phydev, MII_BMCR);
+ ctl &= BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN;
phydev->pause = 0;
phydev->asym_pause = 0;
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 2e21e9366..b62c4aaee 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -75,22 +75,13 @@ static int smsc_phy_reset(struct phy_device *phydev)
* in all capable mode before using it.
*/
if ((rc & MII_LAN83C185_MODE_MASK) == MII_LAN83C185_MODE_POWERDOWN) {
- int timeout = 50000;
-
- /* set "all capable" mode and reset the phy */
+ /* set "all capable" mode */
rc |= MII_LAN83C185_MODE_ALL;
phy_write(phydev, MII_LAN83C185_SPECIAL_MODES, rc);
- phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- /* wait end of reset (max 500 ms) */
- do {
- udelay(10);
- if (timeout-- == 0)
- return -1;
- rc = phy_read(phydev, MII_BMCR);
- } while (rc & BMCR_RESET);
}
- return 0;
+
+ /* reset the phy */
+ return genphy_soft_reset(phydev);
}
static int lan911x_config_init(struct phy_device *phydev)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index f572b31a2..a30ee427e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -46,6 +46,7 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/slab.h>
+#include <linux/file.h>
#include <asm/unaligned.h>
#include <net/slhc_vj.h>
#include <linux/atomic.h>
@@ -183,6 +184,12 @@ struct channel {
#endif /* CONFIG_PPP_MULTILINK */
};
+struct ppp_config {
+ struct file *file;
+ s32 unit;
+ bool ifname_is_set;
+};
+
/*
* SMP locking issues:
* Both the ppp.rlock and ppp.wlock locks protect the ppp.channels
@@ -269,8 +276,7 @@ static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
static void ppp_ccp_closed(struct ppp *ppp);
static struct compressor *find_compressor(int type);
static void ppp_get_stats(struct ppp *ppp, struct ppp_stats *st);
-static struct ppp *ppp_create_interface(struct net *net, int unit,
- struct file *file, int *retp);
+static int ppp_create_interface(struct net *net, struct file *file, int *unit);
static void init_ppp_file(struct ppp_file *pf, int kind);
static void ppp_destroy_interface(struct ppp *ppp);
static struct ppp *ppp_find_unit(struct ppp_net *pn, int unit);
@@ -282,6 +288,7 @@ static int unit_get(struct idr *p, void *ptr);
static int unit_set(struct idr *p, void *ptr, int n);
static void unit_put(struct idr *p, int n);
static void *unit_find(struct idr *p, int n);
+static void ppp_setup(struct net_device *dev);
static const struct net_device_ops ppp_netdev_ops;
@@ -853,12 +860,12 @@ static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
/* Create a new ppp unit */
if (get_user(unit, p))
break;
- ppp = ppp_create_interface(net, unit, file, &err);
- if (!ppp)
+ err = ppp_create_interface(net, file, &unit);
+ if (err < 0)
break;
- file->private_data = &ppp->file;
+
err = -EFAULT;
- if (put_user(ppp->file.index, p))
+ if (put_user(unit, p))
break;
err = 0;
break;
@@ -960,6 +967,188 @@ static struct pernet_operations ppp_net_ops = {
.size = sizeof(struct ppp_net),
};
+static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
+{
+ struct ppp_net *pn = ppp_pernet(ppp->ppp_net);
+ int ret;
+
+ mutex_lock(&pn->all_ppp_mutex);
+
+ if (unit < 0) {
+ ret = unit_get(&pn->units_idr, ppp);
+ if (ret < 0)
+ goto err;
+ } else {
+ /* Caller asked for a specific unit number. Fail with -EEXIST
+ * if unavailable. For backward compatibility, return -EEXIST
+ * too if idr allocation fails; this makes pppd retry without
+ * requesting a specific unit number.
+ */
+ if (unit_find(&pn->units_idr, unit)) {
+ ret = -EEXIST;
+ goto err;
+ }
+ ret = unit_set(&pn->units_idr, ppp, unit);
+ if (ret < 0) {
+ /* Rewrite error for backward compatibility */
+ ret = -EEXIST;
+ goto err;
+ }
+ }
+ ppp->file.index = ret;
+
+ if (!ifname_is_set)
+ snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ppp->file.index);
+
+ ret = register_netdevice(ppp->dev);
+ if (ret < 0)
+ goto err_unit;
+
+ atomic_inc(&ppp_unit_count);
+
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ return 0;
+
+err_unit:
+ unit_put(&pn->units_idr, ppp->file.index);
+err:
+ mutex_unlock(&pn->all_ppp_mutex);
+
+ return ret;
+}
+
+static int ppp_dev_configure(struct net *src_net, struct net_device *dev,
+ const struct ppp_config *conf)
+{
+ struct ppp *ppp = netdev_priv(dev);
+ int indx;
+ int err;
+
+ ppp->dev = dev;
+ ppp->ppp_net = src_net;
+ ppp->mru = PPP_MRU;
+ ppp->owner = conf->file;
+
+ init_ppp_file(&ppp->file, INTERFACE);
+ ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
+
+ for (indx = 0; indx < NUM_NP; ++indx)
+ ppp->npmode[indx] = NPMODE_PASS;
+ INIT_LIST_HEAD(&ppp->channels);
+ spin_lock_init(&ppp->rlock);
+ spin_lock_init(&ppp->wlock);
+#ifdef CONFIG_PPP_MULTILINK
+ ppp->minseq = -1;
+ skb_queue_head_init(&ppp->mrq);
+#endif /* CONFIG_PPP_MULTILINK */
+#ifdef CONFIG_PPP_FILTER
+ ppp->pass_filter = NULL;
+ ppp->active_filter = NULL;
+#endif /* CONFIG_PPP_FILTER */
+
+ err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set);
+ if (err < 0)
+ return err;
+
+ conf->file->private_data = &ppp->file;
+
+ return 0;
+}
+
+static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = {
+ [IFLA_PPP_DEV_FD] = { .type = NLA_S32 },
+};
+
+static int ppp_nl_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!data[IFLA_PPP_DEV_FD])
+ return -EINVAL;
+ if (nla_get_s32(data[IFLA_PPP_DEV_FD]) < 0)
+ return -EBADF;
+
+ return 0;
+}
+
+static int ppp_nl_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct ppp_config conf = {
+ .unit = -1,
+ .ifname_is_set = true,
+ };
+ struct file *file;
+ int err;
+
+ file = fget(nla_get_s32(data[IFLA_PPP_DEV_FD]));
+ if (!file)
+ return -EBADF;
+
+ /* rtnl_lock is already held here, but ppp_create_interface() locks
+ * ppp_mutex before holding rtnl_lock. Using mutex_trylock() avoids
+ * possible deadlock due to lock order inversion, at the cost of
+ * pushing the problem back to userspace.
+ */
+ if (!mutex_trylock(&ppp_mutex)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (file->f_op != &ppp_device_fops || file->private_data) {
+ err = -EBADF;
+ goto out_unlock;
+ }
+
+ conf.file = file;
+ err = ppp_dev_configure(src_net, dev, &conf);
+
+out_unlock:
+ mutex_unlock(&ppp_mutex);
+out:
+ fput(file);
+
+ return err;
+}
+
+static void ppp_nl_dellink(struct net_device *dev, struct list_head *head)
+{
+ unregister_netdevice_queue(dev, head);
+}
+
+static size_t ppp_nl_get_size(const struct net_device *dev)
+{
+ return 0;
+}
+
+static int ppp_nl_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ return 0;
+}
+
+static struct net *ppp_nl_get_link_net(const struct net_device *dev)
+{
+ struct ppp *ppp = netdev_priv(dev);
+
+ return ppp->ppp_net;
+}
+
+static struct rtnl_link_ops ppp_link_ops __read_mostly = {
+ .kind = "ppp",
+ .maxtype = IFLA_PPP_MAX,
+ .policy = ppp_nl_policy,
+ .priv_size = sizeof(struct ppp),
+ .setup = ppp_setup,
+ .validate = ppp_nl_validate,
+ .newlink = ppp_nl_newlink,
+ .dellink = ppp_nl_dellink,
+ .get_size = ppp_nl_get_size,
+ .fill_info = ppp_nl_fill_info,
+ .get_link_net = ppp_nl_get_link_net,
+};
+
#define PPP_MAJOR 108
/* Called at boot time if ppp is compiled into the kernel,
@@ -988,11 +1177,19 @@ static int __init ppp_init(void)
goto out_chrdev;
}
+ err = rtnl_link_register(&ppp_link_ops);
+ if (err) {
+ pr_err("failed to register rtnetlink PPP handler\n");
+ goto out_class;
+ }
+
/* not a big deal if we fail here :-) */
device_create(ppp_class, NULL, MKDEV(PPP_MAJOR, 0), NULL, "ppp");
return 0;
+out_class:
+ class_destroy(ppp_class);
out_chrdev:
unregister_chrdev(PPP_MAJOR, "ppp");
out_net:
@@ -2404,8 +2601,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
@@ -2732,102 +2927,42 @@ ppp_get_stats(struct ppp *ppp, struct ppp_stats *st)
* or if there is already a unit with the requested number.
* unit == -1 means allocate a new number.
*/
-static struct ppp *ppp_create_interface(struct net *net, int unit,
- struct file *file, int *retp)
+static int ppp_create_interface(struct net *net, struct file *file, int *unit)
{
+ struct ppp_config conf = {
+ .file = file,
+ .unit = *unit,
+ .ifname_is_set = false,
+ };
+ struct net_device *dev;
struct ppp *ppp;
- struct ppp_net *pn;
- struct net_device *dev = NULL;
- int ret = -ENOMEM;
- int i;
+ int err;
dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_ENUM, ppp_setup);
- if (!dev)
- goto out1;
-
- pn = ppp_pernet(net);
-
- ppp = netdev_priv(dev);
- ppp->dev = dev;
- ppp->mru = PPP_MRU;
- init_ppp_file(&ppp->file, INTERFACE);
- ppp->file.hdrlen = PPP_HDRLEN - 2; /* don't count proto bytes */
- ppp->owner = file;
- for (i = 0; i < NUM_NP; ++i)
- ppp->npmode[i] = NPMODE_PASS;
- INIT_LIST_HEAD(&ppp->channels);
- spin_lock_init(&ppp->rlock);
- spin_lock_init(&ppp->wlock);
-#ifdef CONFIG_PPP_MULTILINK
- ppp->minseq = -1;
- skb_queue_head_init(&ppp->mrq);
-#endif /* CONFIG_PPP_MULTILINK */
-#ifdef CONFIG_PPP_FILTER
- ppp->pass_filter = NULL;
- ppp->active_filter = NULL;
-#endif /* CONFIG_PPP_FILTER */
-
- /*
- * drum roll: don't forget to set
- * the net device is belong to
- */
+ if (!dev) {
+ err = -ENOMEM;
+ goto err;
+ }
dev_net_set(dev, net);
+ dev->rtnl_link_ops = &ppp_link_ops;
rtnl_lock();
- mutex_lock(&pn->all_ppp_mutex);
- if (unit < 0) {
- unit = unit_get(&pn->units_idr, ppp);
- if (unit < 0) {
- ret = unit;
- goto out2;
- }
- } else {
- ret = -EEXIST;
- if (unit_find(&pn->units_idr, unit))
- goto out2; /* unit already exists */
- /*
- * if caller need a specified unit number
- * lets try to satisfy him, otherwise --
- * he should better ask us for new unit number
- *
- * NOTE: yes I know that returning EEXIST it's not
- * fair but at least pppd will ask us to allocate
- * new unit in this case so user is happy :)
- */
- unit = unit_set(&pn->units_idr, ppp, unit);
- if (unit < 0)
- goto out2;
- }
-
- /* Initialize the new ppp unit */
- ppp->file.index = unit;
- sprintf(dev->name, "ppp%d", unit);
-
- ret = register_netdevice(dev);
- if (ret != 0) {
- unit_put(&pn->units_idr, unit);
- netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
- dev->name, ret);
- goto out2;
- }
-
- ppp->ppp_net = net;
+ err = ppp_dev_configure(net, dev, &conf);
+ if (err < 0)
+ goto err_dev;
+ ppp = netdev_priv(dev);
+ *unit = ppp->file.index;
- atomic_inc(&ppp_unit_count);
- mutex_unlock(&pn->all_ppp_mutex);
rtnl_unlock();
- *retp = 0;
- return ppp;
+ return 0;
-out2:
- mutex_unlock(&pn->all_ppp_mutex);
+err_dev:
rtnl_unlock();
free_netdev(dev);
-out1:
- *retp = ret;
- return NULL;
+err:
+ return err;
}
/*
@@ -2999,6 +3134,9 @@ ppp_disconnect_channel(struct channel *pch)
*/
static void ppp_destroy_channel(struct channel *pch)
{
+ put_net(pch->chan_net);
+ pch->chan_net = NULL;
+
atomic_dec(&channel_count);
if (!pch->file.dead) {
@@ -3016,6 +3154,7 @@ static void __exit ppp_cleanup(void)
/* should never happen */
if (atomic_read(&ppp_unit_count) || atomic_read(&channel_count))
pr_err("PPP: removing module but units remain!\n");
+ rtnl_link_unregister(&ppp_link_ops);
unregister_chrdev(PPP_MAJOR, "ppp");
device_destroy(ppp_class, MKDEV(PPP_MAJOR, 0));
class_destroy(ppp_class);
@@ -3074,4 +3213,5 @@ EXPORT_SYMBOL(ppp_register_compressor);
EXPORT_SYMBOL(ppp_unregister_compressor);
MODULE_LICENSE("GPL");
MODULE_ALIAS_CHARDEV(PPP_MAJOR, 0);
+MODULE_ALIAS_RTNL_LINK("ppp");
MODULE_ALIAS("devname:ppp");
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 9cfe6aeac..a31f4610b 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -179,11 +179,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
unsigned long flags;
int add_num = 1;
- local_irq_save(flags);
- if (!spin_trylock(&rnet->tx_lock)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&rnet->tx_lock, flags);
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index a17d86a57..9ed6d1c1e 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -407,7 +407,7 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
#ifdef SL_CHECK_TRANSMIT
- sl->dev->trans_start = jiffies;
+ netif_trans_update(sl->dev);
#endif
sl->xleft = count - actual;
sl->xhead = sl->xbuff + actual;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 2ace12653..fdee77207 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1203,8 +1203,10 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_dev_open;
}
+ netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
+ netif_addr_unlock_bh(dev);
err = vlan_vids_add_by_dev(port_dev, dev);
if (err) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dda490542..e16487cc6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -131,6 +131,17 @@ struct tap_filter {
#define TUN_FLOW_EXPIRE (3 * HZ)
+struct tun_pcpu_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+};
+
/* A tun_file connects an open character device to a tuntap netdevice. It
* also contains all socket related structures (except sock_fprog and tap_filter)
* to serve as one transmit queue for tuntap device. The sock_fprog and
@@ -205,6 +216,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ struct tun_pcpu_stats __percpu *pcpu_stats;
};
#ifdef CONFIG_TUN_VNET_CROSS_LE
@@ -624,8 +636,9 @@ static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filte
/* Re-attach the filter to persist device */
if (!skip_filter && (tun->filter_attached == true)) {
- err = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
- lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ release_sock(tfile->socket.sk);
if (!err)
goto out;
}
@@ -823,7 +836,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (txq >= numqueues)
goto drop;
- if (numqueues == 1) {
+#ifdef CONFIG_RPS
+ if (numqueues == 1 && static_key_false(&rps_needed)) {
/* Select queue was not called for the skbuff, so we extract the
* RPS hash and save it into the flow_table here.
*/
@@ -838,6 +852,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
tun_flow_save_rps_rxhash(e, rxhash);
}
}
+#endif
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
@@ -864,7 +879,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
if (skb->sk && sk_fullsock(skb->sk)) {
- sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+ sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags,
+ &skb_shinfo(skb)->tx_flags);
sw_tx_timestamp(skb);
}
@@ -887,7 +903,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
drop:
- dev->stats.tx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->tx_dropped);
skb_tx_error(skb);
kfree_skb(skb);
rcu_read_unlock();
@@ -950,6 +966,43 @@ static void tun_set_headroom(struct net_device *dev, int new_hr)
tun->align = new_hr;
}
+static struct rtnl_link_stats64 *
+tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
+ struct tun_struct *tun = netdev_priv(dev);
+ struct tun_pcpu_stats *p;
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 rxpackets, rxbytes, txpackets, txbytes;
+ unsigned int start;
+
+ p = per_cpu_ptr(tun->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ stats->rx_packets += rxpackets;
+ stats->rx_bytes += rxbytes;
+ stats->tx_packets += txpackets;
+ stats->tx_bytes += txbytes;
+
+ /* u32 counters */
+ rx_dropped += p->rx_dropped;
+ rx_frame_errors += p->rx_frame_errors;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
@@ -962,6 +1015,7 @@ static const struct net_device_ops tun_netdev_ops = {
.ndo_poll_controller = tun_poll_controller,
#endif
.ndo_set_rx_headroom = tun_set_headroom,
+ .ndo_get_stats64 = tun_net_get_stats64,
};
static const struct net_device_ops tap_netdev_ops = {
@@ -980,6 +1034,7 @@ static const struct net_device_ops tap_netdev_ops = {
#endif
.ndo_features_check = passthru_features_check,
.ndo_set_rx_headroom = tun_set_headroom,
+ .ndo_get_stats64 = tun_net_get_stats64,
};
static void tun_flow_init(struct tun_struct *tun)
@@ -1104,6 +1159,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
size_t total_len = iov_iter_count(from);
size_t len = total_len, align = tun->align, linear;
struct virtio_net_hdr gso = { 0 };
+ struct tun_pcpu_stats *stats;
int good_linear;
int copylen;
bool zerocopy = false;
@@ -1178,7 +1234,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
return PTR_ERR(skb);
}
@@ -1193,7 +1249,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (err) {
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EFAULT;
}
@@ -1201,7 +1257,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (!skb_partial_csum_set(skb, tun16_to_cpu(tun, gso.csum_start),
tun16_to_cpu(tun, gso.csum_offset))) {
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1218,7 +1274,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
pi.proto = htons(ETH_P_IPV6);
break;
default:
- tun->dev->stats.rx_dropped++;
+ this_cpu_inc(tun->pcpu_stats->rx_dropped);
kfree_skb(skb);
return -EINVAL;
}
@@ -1246,7 +1302,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
default:
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1256,7 +1312,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_shinfo(skb)->gso_size = tun16_to_cpu(tun, gso.gso_size);
if (skb_shinfo(skb)->gso_size == 0) {
- tun->dev->stats.rx_frame_errors++;
+ this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
kfree_skb(skb);
return -EINVAL;
}
@@ -1279,8 +1335,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rxhash = skb_get_hash(skb);
netif_rx_ni(skb);
- tun->dev->stats.rx_packets++;
- tun->dev->stats.rx_bytes += len;
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(stats);
tun_flow_update(tun, rxhash, tfile);
return total_len;
@@ -1309,6 +1369,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
struct iov_iter *iter)
{
struct tun_pi pi = { 0, skb->protocol };
+ struct tun_pcpu_stats *stats;
ssize_t total;
int vlan_offset = 0;
int vlan_hlen = 0;
@@ -1409,8 +1470,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
done:
- tun->dev->stats.tx_packets++;
- tun->dev->stats.tx_bytes += skb->len + vlan_hlen;
+ /* caller is in process context, */
+ stats = get_cpu_ptr(tun->pcpu_stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len + vlan_hlen;
+ u64_stats_update_end(&stats->syncp);
+ put_cpu_ptr(tun->pcpu_stats);
return total;
}
@@ -1465,6 +1531,7 @@ static void tun_free_netdev(struct net_device *dev)
struct tun_struct *tun = netdev_priv(dev);
BUG_ON(!(list_empty(&tun->disabled)));
+ free_percpu(tun->pcpu_stats);
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
free_netdev(dev);
@@ -1713,11 +1780,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
+ tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
+ if (!tun->pcpu_stats) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
+
spin_lock_init(&tun->lock);
err = security_tun_dev_alloc_security(&tun->security);
if (err < 0)
- goto err_free_dev;
+ goto err_free_stat;
tun_net_init(dev);
tun_flow_init(tun);
@@ -1725,7 +1798,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- dev->features = dev->hw_features;
+ dev->features = dev->hw_features | NETIF_F_LLTX;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
@@ -1761,6 +1834,8 @@ err_detach:
err_free_flow:
tun_flow_uninit(tun);
security_tun_dev_free_security(tun->security);
+err_free_stat:
+ free_percpu(tun->pcpu_stats);
err_free_dev:
free_netdev(dev);
return err;
@@ -1823,7 +1898,9 @@ static void tun_detach_filter(struct tun_struct *tun, int n)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- __sk_detach_filter(tfile->socket.sk, lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ sk_detach_filter(tfile->socket.sk);
+ release_sock(tfile->socket.sk);
}
tun->filter_attached = false;
@@ -1836,8 +1913,9 @@ static int tun_attach_filter(struct tun_struct *tun)
for (i = 0; i < tun->numqueues; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
- ret = __sk_attach_filter(&tun->fprog, tfile->socket.sk,
- lockdep_rtnl_is_held());
+ lock_sock(tfile->socket.sk);
+ ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
+ release_sock(tfile->socket.sk);
if (ret) {
tun_detach_filter(tun, i);
return ret;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 4e2b26a88..d9ca05d3a 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -376,7 +376,7 @@ static int catc_tx_run(struct catc *catc)
catc->tx_idx = !catc->tx_idx;
catc->tx_ptr = 0;
- catc->netdev->trans_start = jiffies;
+ netif_trans_update(catc->netdev);
return status;
}
@@ -389,7 +389,7 @@ static void catc_tx_done(struct urb *urb)
if (status == -ECONNRESET) {
dev_dbg(&urb->dev->dev, "Tx Reset.\n");
urb->status = 0;
- catc->netdev->trans_start = jiffies;
+ netif_trans_update(catc->netdev);
catc->netdev->stats.tx_errors++;
clear_bit(TX_RUNNING, &catc->flags);
netif_wake_queue(catc->netdev);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index d4425c565..877c9516e 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -740,12 +740,14 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
- struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
+ int maxmtu = cdc_ncm_max_dgram_size(dev) - cdc_ncm_eth_hlen(dev);
if (new_mtu <= 0 || new_mtu > maxmtu)
return -EINVAL;
+
net->mtu = new_mtu;
+ cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
+
return 0;
}
EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index 5e151e6a3..8a40202c0 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -155,12 +155,11 @@ static int control_write(struct usbnet *dev, unsigned char request,
index, size);
if (data) {
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (!buf) {
err = -ENOMEM;
goto err_out;
}
- memcpy(buf, data, size);
}
err = usb_control_msg(dev->udev,
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 111d907e0..4b4458616 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2029,7 +2029,7 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
tty = tty_port_tty_get(&serial->port);
- if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+ if (tty && tty_throttled(tty)) {
tty_kref_put(tty);
return -1;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index b0f65bcfa..52c72ed22 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -935,7 +935,7 @@ static void kaweth_tx_timeout(struct net_device *net)
dev_warn(&net->dev, "%s: Tx timed out. Resetting.\n", net->name);
kaweth->stats.tx_errors++;
- net->trans_start = jiffies;
+ netif_trans_update(net);
usb_unlink_urb(kaweth->tx_urb);
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index f64778ad9..6a9d474b0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -3045,7 +3045,7 @@ gso_skb:
ret = usb_submit_urb(urb, GFP_ATOMIC);
switch (ret) {
case 0:
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
if (skb_queue_len(&dev->txq) >= dev->tx_qlen)
netif_stop_queue(dev->net);
@@ -3729,7 +3729,7 @@ int lan78xx_resume(struct usb_interface *intf)
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
lan78xx_queue_skb(&dev->txq, skb, tx_start);
}
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 82129eef7..9bbe0161a 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -473,7 +473,7 @@ static void read_bulk_callback(struct urb *urb)
goto goon;
}
- if (!count || count < 4)
+ if (count < 4)
goto goon;
rx_status = buf[count - 2];
@@ -615,7 +615,7 @@ static void write_bulk_callback(struct urb *urb)
break;
}
- net->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(net); /* prevent tx timeout */
netif_wake_queue(net);
}
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d1f78c2c9..e9654a685 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -26,12 +26,13 @@
#include <linux/mdio.h>
#include <linux/usb/cdc.h>
#include <linux/suspend.h>
+#include <linux/acpi.h>
/* Information for net-next */
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "3"
+#define NET_VERSION "5"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -116,6 +117,7 @@
#define USB_TX_DMA 0xd434
#define USB_TOLERANCE 0xd490
#define USB_LPM_CTRL 0xd41a
+#define USB_BMU_RESET 0xd4b0
#define USB_UPS_CTRL 0xd800
#define USB_MISC_0 0xd81a
#define USB_POWER_CUT 0xd80a
@@ -338,6 +340,10 @@
#define TEST_MODE_DISABLE 0x00000001
#define TX_SIZE_ADJUST1 0x00000100
+/* USB_BMU_RESET */
+#define BMU_RESET_EP_IN 0x01
+#define BMU_RESET_EP_OUT 0x02
+
/* USB_UPS_CTRL */
#define POWER_CUT 0x0100
@@ -455,6 +461,11 @@
/* SRAM_IMPEDANCE */
#define RX_DRIVING_MASK 0x6000
+/* MAC PASSTHRU */
+#define AD_MASK 0xfee0
+#define EFUSE 0xcfdb
+#define PASS_THRU_MASK 0x1
+
enum rtl_register_content {
_1000bps = 0x10,
_100bps = 0x08,
@@ -619,6 +630,7 @@ struct r8152 {
int (*eee_get)(struct r8152 *, struct ethtool_eee *);
int (*eee_set)(struct r8152 *, struct ethtool_eee *);
bool (*in_nway)(struct r8152 *);
+ void (*autosuspend_en)(struct r8152 *tp, bool enable);
} rtl_ops;
int intr_interval;
@@ -1030,6 +1042,65 @@ out1:
return ret;
}
+/* Devices containing RTL8153-AD can support a persistent
+ * host system provided MAC address.
+ * Examples of this are Dell TB15 and Dell WD15 docks
+ */
+static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
+{
+ acpi_status status;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ int ret = -EINVAL;
+ u32 ocp_data;
+ unsigned char buf[6];
+
+ /* test for -AD variant of RTL8153 */
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
+ if ((ocp_data & AD_MASK) != 0x1000)
+ return -ENODEV;
+
+ /* test for MAC address pass-through bit */
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, EFUSE);
+ if ((ocp_data & PASS_THRU_MASK) != 1)
+ return -ENODEV;
+
+ /* returns _AUXMAC_#AABBCCDDEEFF# */
+ status = acpi_evaluate_object(NULL, "\\_SB.AMAC", NULL, &buffer);
+ obj = (union acpi_object *)buffer.pointer;
+ if (!ACPI_SUCCESS(status))
+ return -ENODEV;
+ if (obj->type != ACPI_TYPE_BUFFER || obj->string.length != 0x17) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid buffer when reading pass-thru MAC addr: "
+ "(%d, %d)\n",
+ obj->type, obj->string.length);
+ goto amacout;
+ }
+ if (strncmp(obj->string.pointer, "_AUXMAC_#", 9) != 0 ||
+ strncmp(obj->string.pointer + 0x15, "#", 1) != 0) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid header when reading pass-thru MAC addr\n");
+ goto amacout;
+ }
+ ret = hex2bin(buf, obj->string.pointer + 9, 6);
+ if (!(ret == 0 && is_valid_ether_addr(buf))) {
+ netif_warn(tp, probe, tp->netdev,
+ "Invalid MAC when reading pass-thru MAC addr: "
+ "%d, %pM\n", ret, buf);
+ ret = -EINVAL;
+ goto amacout;
+ }
+ memcpy(sa->sa_data, buf, 6);
+ ether_addr_copy(tp->netdev->dev_addr, sa->sa_data);
+ netif_info(tp, probe, tp->netdev,
+ "Using pass-thru MAC addr %pM\n", sa->sa_data);
+
+amacout:
+ kfree(obj);
+ return ret;
+}
+
static int set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
@@ -1038,8 +1109,15 @@ static int set_ethernet_addr(struct r8152 *tp)
if (tp->version == RTL_VER_01)
ret = pla_ocp_read(tp, PLA_IDR, 8, sa.sa_data);
- else
- ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ else {
+ /* if this is not an RTL8153-AD, no eFuse mac pass thru set,
+ * or system doesn't provide valid _SB.AMAC this will be
+ * be expected to non-zero
+ */
+ ret = vendor_mac_passthru_addr_read(tp, &sa);
+ if (ret < 0)
+ ret = pla_ocp_read(tp, PLA_BACKUP, 8, sa.sa_data);
+ }
if (ret < 0) {
netif_err(tp, probe, dev, "Get ether addr fail\n");
@@ -2169,7 +2247,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp)
static void r8153_set_rx_early_size(struct r8152 *tp)
{
u32 mtu = tp->netdev->mtu;
- u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 4;
+ u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8;
ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data);
}
@@ -2290,10 +2368,6 @@ static u32 __rtl_get_wol(struct r8152 *tp)
u32 ocp_data;
u32 wolopts = 0;
- ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- if (!(ocp_data & LAN_WAKE_EN))
- return 0;
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
if (ocp_data & LINK_ON_WAKE_EN)
wolopts |= WAKE_PHY;
@@ -2326,15 +2400,13 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
- ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN);
if (wolopts & WAKE_UCAST)
ocp_data |= UWF_EN;
if (wolopts & WAKE_BCAST)
ocp_data |= BWF_EN;
if (wolopts & WAKE_MCAST)
ocp_data |= MWF_EN;
- if (wolopts & WAKE_ANY)
- ocp_data |= LAN_WAKE_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
@@ -2403,9 +2475,6 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
if (enable) {
u32 ocp_data;
- r8153_u1u2en(tp, false);
- r8153_u2p3en(tp, false);
-
__rtl_set_wol(tp, WAKE_ANY);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2416,7 +2485,28 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
} else {
+ u32 ocp_data;
+
__rtl_set_wol(tp, tp->saved_wolopts);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ }
+}
+
+static void rtl8153_runtime_enable(struct r8152 *tp, bool enable)
+{
+ rtl_runtime_suspend_enable(tp, enable);
+
+ if (enable) {
+ r8153_u1u2en(tp, false);
+ r8153_u2p3en(tp, false);
+ } else {
r8153_u2p3en(tp, true);
r8153_u1u2en(tp, true);
}
@@ -2456,6 +2546,17 @@ static void r8153_teredo_off(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
}
+static void rtl_reset_bmu(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_BMU_RESET);
+ ocp_data &= ~(BMU_RESET_EP_IN | BMU_RESET_EP_OUT);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+ ocp_data |= BMU_RESET_EP_IN | BMU_RESET_EP_OUT;
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BMU_RESET, ocp_data);
+}
+
static void r8152_aldps_en(struct r8152 *tp, bool enable)
{
if (enable) {
@@ -2681,6 +2782,7 @@ static void r8153_first_init(struct r8152 *tp)
r8153_hw_phy_cfg(tp);
rtl8152_nic_reset(tp);
+ rtl_reset_bmu(tp);
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
ocp_data &= ~NOW_IS_OOB;
@@ -2742,6 +2844,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -2803,6 +2906,7 @@ static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
rtl_disable(tp);
+ rtl_reset_bmu(tp);
r8153_aldps_en(tp, true);
usb_enable_lpm(tp->udev);
}
@@ -3366,7 +3470,7 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM;
- if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER)
+ if (tp->version == RTL_VER_04 && tp->udev->speed < USB_SPEED_SUPER)
ocp_data |= LPM_TIMER_500MS;
else
ocp_data |= LPM_TIMER_500US;
@@ -3382,15 +3486,11 @@ static void r8153_init(struct r8152 *tp)
r8153_power_cut_en(tp, false);
r8153_u1u2en(tp, true);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3,
- PKT_AVAIL_SPDWN_EN | SUSPEND_SPDWN_EN |
- U1U2_SPDWN_EN | L1_SPDWN_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4,
- PWRSAVE_SPDWN_EN | RXDV_SPDWN_EN | TX10MIDLE_EN |
- TP100_SPDWN_EN | TP500_SPDWN_EN | TP1000_SPDWN_EN |
- EEE_SPDWN_EN);
+ /* MAC clock speed down */
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
@@ -3497,7 +3597,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
napi_disable(&tp->napi);
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_stop_rx(tp);
- rtl_runtime_suspend_enable(tp, true);
+ tp->rtl_ops.autosuspend_en(tp, true);
} else {
cancel_delayed_work_sync(&tp->schedule);
tp->rtl_ops.down(tp);
@@ -3523,7 +3623,7 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
@@ -3542,7 +3642,7 @@ static int rtl8152_resume(struct usb_interface *intf)
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
if (tp->netdev->flags & IFF_UP)
- rtl_runtime_suspend_enable(tp, false);
+ tp->rtl_ops.autosuspend_en(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@@ -4122,6 +4222,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8152_get_eee;
ops->eee_set = r8152_set_eee;
ops->in_nway = rtl8152_in_nway;
+ ops->autosuspend_en = rtl_runtime_suspend_enable;
break;
case RTL_VER_03:
@@ -4137,6 +4238,7 @@ static int rtl_ops_init(struct r8152 *tp)
ops->eee_get = r8153_get_eee;
ops->eee_set = r8153_set_eee;
ops->in_nway = rtl8153_in_nway;
+ ops->autosuspend_en = rtl8153_runtime_enable;
break;
default:
@@ -4211,6 +4313,7 @@ static int rtl8152_probe(struct usb_interface *intf,
switch (udev->speed) {
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
tp->coalesce = COALESCE_SUPER;
break;
case USB_SPEED_HIGH:
@@ -4322,3 +4425,4 @@ module_usb_driver(rtl8152_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index d37b7dce2..7c72bfac8 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -451,7 +451,7 @@ static void write_bulk_callback(struct urb *urb)
if (status)
dev_info(&urb->dev->dev, "%s: Tx status %d\n",
dev->netdev->name, status);
- dev->netdev->trans_start = jiffies;
+ netif_trans_update(dev->netdev);
netif_wake_queue(dev->netdev);
}
@@ -694,7 +694,7 @@ static netdev_tx_t rtl8150_start_xmit(struct sk_buff *skb,
} else {
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += skb->len;
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index c369db99c..9af979993 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -99,9 +99,11 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
+ return ret;
+ }
le32_to_cpus(&buf);
*data = buf;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2edc2bc6d..dc989a8b5 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -61,6 +61,8 @@
#define SUSPEND_ALLMODES (SUSPEND_SUSPEND0 | SUSPEND_SUSPEND1 | \
SUSPEND_SUSPEND2 | SUSPEND_SUSPEND3)
+#define CARRIER_CHECK_DELAY (2 * HZ)
+
struct smsc95xx_priv {
u32 mac_cr;
u32 hash_hi;
@@ -69,6 +71,9 @@ struct smsc95xx_priv {
spinlock_t mac_cr_lock;
u8 features;
u8 suspend_flags;
+ bool link_ok;
+ struct delayed_work carrier_check;
+ struct usbnet *dev;
};
static bool turbo_mode = true;
@@ -92,9 +97,11 @@ static int __must_check __smsc95xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
+ return ret;
+ }
le32_to_cpus(&buf);
*data = buf;
@@ -622,6 +629,44 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
intdata);
}
+static void set_carrier(struct usbnet *dev, bool link)
+{
+ struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
+ if (pdata->link_ok == link)
+ return;
+
+ pdata->link_ok = link;
+
+ if (link)
+ usbnet_link_change(dev, 1, 0);
+ else
+ usbnet_link_change(dev, 0, 0);
+}
+
+static void check_carrier(struct work_struct *work)
+{
+ struct smsc95xx_priv *pdata = container_of(work, struct smsc95xx_priv,
+ carrier_check.work);
+ struct usbnet *dev = pdata->dev;
+ int ret;
+
+ if (pdata->suspend_flags != 0)
+ return;
+
+ ret = smsc95xx_mdio_read(dev->net, dev->mii.phy_id, MII_BMSR);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read MII_BMSR\n");
+ return;
+ }
+ if (ret & BMSR_LSTATUS)
+ set_carrier(dev, 1);
+ else
+ set_carrier(dev, 0);
+
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+}
+
/* Enable or disable Tx & Rx checksum offload engines */
static int smsc95xx_set_features(struct net_device *netdev,
netdev_features_t features)
@@ -1163,13 +1208,20 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->flags |= IFF_MULTICAST;
dev->net->hard_header_len += SMSC95XX_TX_OVERHEAD_CSUM;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+
+ pdata->dev = dev;
+ INIT_DELAYED_WORK(&pdata->carrier_check, check_carrier);
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
+
return 0;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
+
if (pdata) {
+ cancel_delayed_work(&pdata->carrier_check);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
@@ -1693,6 +1745,7 @@ static int smsc95xx_resume(struct usb_interface *intf)
/* do this first to ensure it's cleared even in error case */
pdata->suspend_flags = 0;
+ schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
if (suspend_flags & SUSPEND_ALLMODES) {
/* clear wake-up sources */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 10798128c..6086a0163 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -356,6 +356,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
break;
case USB_SPEED_SUPER:
+ case USB_SPEED_SUPER_PLUS:
/*
* Not take default 5ms qlen for super speed HC to
* save memory, and iperf tests show 2.5ms qlen can
@@ -394,8 +395,11 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
dev->rx_urb_size = dev->hard_mtu;
- if (dev->rx_urb_size > old_rx_urb_size)
+ if (dev->rx_urb_size > old_rx_urb_size) {
+ usbnet_pause_rx(dev);
usbnet_unlink_rx_urbs(dev);
+ usbnet_resume_rx(dev);
+ }
}
/* max qlen depend on hard_mtu and rx_urb_size */
@@ -1415,7 +1419,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
"tx: submit urb err %d\n", retval);
break;
case 0:
- net->trans_start = jiffies;
+ netif_trans_update(net);
__usbnet_queue_skb(&dev->txq, skb, tx_start);
if (dev->txq.qlen >= TX_QLEN (dev))
netif_stop_queue (net);
@@ -1507,8 +1511,9 @@ static void usbnet_bh (unsigned long param)
} else if (netif_running (dev->net) &&
netif_device_present (dev->net) &&
netif_carrier_ok(dev->net) &&
- !timer_pending (&dev->delay) &&
- !test_bit (EVENT_RX_HALT, &dev->flags)) {
+ !timer_pending(&dev->delay) &&
+ !test_bit(EVENT_RX_PAUSED, &dev->flags) &&
+ !test_bit(EVENT_RX_HALT, &dev->flags)) {
int temp = dev->rxq.qlen;
if (temp < RX_QLEN(dev)) {
@@ -1844,7 +1849,7 @@ int usbnet_resume (struct usb_interface *intf)
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
- dev->net->trans_start = jiffies;
+ netif_trans_update(dev->net);
__skb_queue_tail(&dev->txq, skb);
}
}
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 4f30a6ae5..f37a6e61d 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -312,10 +312,9 @@ static const struct net_device_ops veth_netdev_ops = {
.ndo_set_rx_headroom = veth_set_rx_headroom,
};
-#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
- NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
- NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL | \
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT | NETIF_F_UFO | \
+#define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HW_CSUM | \
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA | \
+ NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_STAG_TX | NETIF_F_HW_VLAN_STAG_RX )
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 49d84e540..e0638e556 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1925,24 +1925,11 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- /* Last of all, set up some receive buffers. */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- try_fill_recv(vi, &vi->rq[i], GFP_KERNEL);
-
- /* If we didn't even get one input buffer, we're useless. */
- if (vi->rq[i].vq->num_free ==
- virtqueue_get_vring_size(vi->rq[i].vq)) {
- free_unused_bufs(vi);
- err = -ENOMEM;
- goto free_recv_bufs;
- }
- }
-
vi->nb.notifier_call = &virtnet_cpu_callback;
err = register_hotcpu_notifier(&vi->nb);
if (err) {
pr_debug("virtio_net: registering cpu notifier failed\n");
- goto free_recv_bufs;
+ goto free_unregister_netdev;
}
/* Assume link up if device can't report link status,
@@ -1960,10 +1947,9 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
-free_recv_bufs:
+free_unregister_netdev:
vi->vdev->config->reset(vdev);
- free_receive_bufs(vi);
unregister_netdev(dev);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index db8022ae4..08885bc8d 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd;
segCnt = rcdlro->segCnt;
- BUG_ON(segCnt <= 1);
+ WARN_ON_ONCE(segCnt == 0);
mss = rcdlro->mss;
if (unlikely(segCnt <= 1))
segCnt = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index c4825392d..3d2b64e63 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040700
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040800
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 8a8f1e58b..8bd8c7e1e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -42,12 +42,9 @@
#define DRV_NAME "vrf"
#define DRV_VERSION "1.0"
-#define vrf_master_get_rcu(dev) \
- ((struct net_device *)rcu_dereference(dev->rx_handler_data))
-
struct net_vrf {
- struct rtable *rth;
- struct rt6_info *rt6;
+ struct rtable __rcu *rth;
+ struct rt6_info __rcu *rt6;
u32 tb_id;
};
@@ -60,90 +57,12 @@ struct pcpu_dstats {
struct u64_stats_sync syncp;
};
-/* neighbor handling is done with actual device; do not want
- * to flip skb->dev for those ndisc packets. This really fails
- * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
- * a start.
- */
-#if IS_ENABLED(CONFIG_IPV6)
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
- const struct ipv6hdr *ipv6h;
- struct ipv6hdr _ipv6h;
- bool rc = true;
-
- ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
- if (!ipv6h)
- goto out;
-
- if (ipv6h->nexthdr == NEXTHDR_ICMP) {
- const struct icmp6hdr *icmph;
- struct icmp6hdr _icmph;
-
- icmph = skb_header_pointer(skb, sizeof(_ipv6h),
- sizeof(_icmph), &_icmph);
- if (!icmph)
- goto out;
-
- switch (icmph->icmp6_type) {
- case NDISC_ROUTER_SOLICITATION:
- case NDISC_ROUTER_ADVERTISEMENT:
- case NDISC_NEIGHBOUR_SOLICITATION:
- case NDISC_NEIGHBOUR_ADVERTISEMENT:
- case NDISC_REDIRECT:
- rc = false;
- break;
- }
- }
-
-out:
- return rc;
-}
-#else
-static bool check_ipv6_frame(const struct sk_buff *skb)
-{
- return false;
-}
-#endif
-
-static bool is_ip_rx_frame(struct sk_buff *skb)
-{
- switch (skb->protocol) {
- case htons(ETH_P_IP):
- return true;
- case htons(ETH_P_IPV6):
- return check_ipv6_frame(skb);
- }
- return false;
-}
-
static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
{
vrf_dev->stats.tx_errors++;
kfree_skb(skb);
}
-/* note: already called with rcu_read_lock */
-static rx_handler_result_t vrf_handle_frame(struct sk_buff **pskb)
-{
- struct sk_buff *skb = *pskb;
-
- if (is_ip_rx_frame(skb)) {
- struct net_device *dev = vrf_master_get_rcu(skb->dev);
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
-
- u64_stats_update_begin(&dstats->syncp);
- dstats->rx_pkts++;
- dstats->rx_bytes += skb->len;
- u64_stats_update_end(&dstats->syncp);
-
- skb->dev = dev;
-
- return RX_HANDLER_ANOTHER;
- }
- return RX_HANDLER_PASS;
-}
-
static struct rtnl_link_stats64 *vrf_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
@@ -354,28 +273,40 @@ static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
+/* holding rtnl */
static void vrf_rt6_release(struct net_vrf *vrf)
{
- dst_release(&vrf->rt6->dst);
- vrf->rt6 = NULL;
+ struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
+
+ rcu_assign_pointer(vrf->rt6, NULL);
+
+ if (rt6)
+ dst_release(&rt6->dst);
}
static int vrf_rt6_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct net *net = dev_net(dev);
+ struct fib6_table *rt6i_table;
struct rt6_info *rt6;
int rc = -ENOMEM;
+ rt6i_table = fib6_new_table(net, vrf->tb_id);
+ if (!rt6i_table)
+ goto out;
+
rt6 = ip6_dst_alloc(net, dev,
DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE);
if (!rt6)
goto out;
- rt6->dst.output = vrf_output6;
- rt6->rt6i_table = fib6_get_table(net, vrf->tb_id);
dst_hold(&rt6->dst);
- vrf->rt6 = rt6;
+
+ rt6->rt6i_table = rt6i_table;
+ rt6->dst.output = vrf_output6;
+ rcu_assign_pointer(vrf->rt6, rt6);
+
rc = 0;
out:
return rc;
@@ -449,26 +380,35 @@ static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+/* holding rtnl */
static void vrf_rtable_release(struct net_vrf *vrf)
{
- struct dst_entry *dst = (struct dst_entry *)vrf->rth;
+ struct rtable *rth = rtnl_dereference(vrf->rth);
+
+ rcu_assign_pointer(vrf->rth, NULL);
- dst_release(dst);
- vrf->rth = NULL;
+ if (rth)
+ dst_release(&rth->dst);
}
-static struct rtable *vrf_rtable_create(struct net_device *dev)
+static int vrf_rtable_create(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
struct rtable *rth;
+ if (!fib_new_table(dev_net(dev), vrf->tb_id))
+ return -ENOMEM;
+
rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
- if (rth) {
- rth->dst.output = vrf_output;
- rth->rt_table_id = vrf->tb_id;
- }
+ if (!rth)
+ return -ENOMEM;
- return rth;
+ rth->dst.output = vrf_output;
+ rth->rt_table_id = vrf->tb_id;
+
+ rcu_assign_pointer(vrf->rth, rth);
+
+ return 0;
}
/**************************** device handling ********************/
@@ -497,28 +437,14 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
{
int ret;
- /* register the packet handler for slave ports */
- ret = netdev_rx_handler_register(port_dev, vrf_handle_frame, dev);
- if (ret) {
- netdev_err(port_dev,
- "Device %s failed to register rx_handler\n",
- port_dev->name);
- goto out_fail;
- }
-
ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
if (ret < 0)
- goto out_unregister;
+ return ret;
port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
cycle_netdev(port_dev);
return 0;
-
-out_unregister:
- netdev_rx_handler_unregister(port_dev);
-out_fail:
- return ret;
}
static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
@@ -535,8 +461,6 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
netdev_upper_dev_unlink(port_dev, dev);
port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
- netdev_rx_handler_unregister(port_dev);
-
cycle_netdev(port_dev);
return 0;
@@ -572,8 +496,7 @@ static int vrf_dev_init(struct net_device *dev)
goto out_nomem;
/* create the default dst which points back to us */
- vrf->rth = vrf_rtable_create(dev);
- if (!vrf->rth)
+ if (vrf_rtable_create(dev) != 0)
goto out_stats;
if (vrf_rt6_create(dev) != 0)
@@ -616,8 +539,13 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
if (!(fl4->flowi4_flags & FLOWI_FLAG_L3MDEV_SRC)) {
struct net_vrf *vrf = netdev_priv(dev);
- rth = vrf->rth;
- dst_hold(&rth->dst);
+ rcu_read_lock();
+
+ rth = rcu_dereference(vrf->rth);
+ if (likely(rth))
+ dst_hold(&rth->dst);
+
+ rcu_read_unlock();
}
return rth;
@@ -639,6 +567,8 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
+ /* make sure oif is set to VRF device for lookup */
+ fl4->flowi4_oif = dev->ifindex;
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
@@ -659,19 +589,116 @@ static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
}
#if IS_ENABLED(CONFIG_IPV6)
+/* neighbor handling is done with actual device; do not want
+ * to flip skb->dev for those ndisc packets. This really fails
+ * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
+ * a start.
+ */
+static bool ipv6_ndisc_frame(const struct sk_buff *skb)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ bool rc = false;
+
+ if (iph->nexthdr == NEXTHDR_ICMP) {
+ const struct icmp6hdr *icmph;
+ struct icmp6hdr _icmph;
+
+ icmph = skb_header_pointer(skb, sizeof(*iph),
+ sizeof(_icmph), &_icmph);
+ if (!icmph)
+ goto out;
+
+ switch (icmph->icmp6_type) {
+ case NDISC_ROUTER_SOLICITATION:
+ case NDISC_ROUTER_ADVERTISEMENT:
+ case NDISC_NEIGHBOUR_SOLICITATION:
+ case NDISC_NEIGHBOUR_ADVERTISEMENT:
+ case NDISC_REDIRECT:
+ rc = true;
+ break;
+ }
+ }
+
+out:
+ return rc;
+}
+
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ /* if packet is NDISC keep the ingress interface */
+ if (!ipv6_ndisc_frame(skb)) {
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+
+ IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
+ }
+
+ return skb;
+}
+
+#else
+static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ return skb;
+}
+#endif
+
+static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb)
+{
+ skb->dev = vrf_dev;
+ skb->skb_iif = vrf_dev->ifindex;
+
+ skb_push(skb, skb->mac_len);
+ dev_queue_xmit_nit(skb, vrf_dev);
+ skb_pull(skb, skb->mac_len);
+
+ return skb;
+}
+
+/* called with rcu lock held */
+static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
+ struct sk_buff *skb,
+ u16 proto)
+{
+ switch (proto) {
+ case AF_INET:
+ return vrf_ip_rcv(vrf_dev, skb);
+ case AF_INET6:
+ return vrf_ip6_rcv(vrf_dev, skb);
+ }
+
+ return skb;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
static struct dst_entry *vrf_get_rt6_dst(const struct net_device *dev,
const struct flowi6 *fl6)
{
- struct rt6_info *rt = NULL;
+ struct dst_entry *dst = NULL;
if (!(fl6->flowi6_flags & FLOWI_FLAG_L3MDEV_SRC)) {
struct net_vrf *vrf = netdev_priv(dev);
+ struct rt6_info *rt;
+
+ rcu_read_lock();
+
+ rt = rcu_dereference(vrf->rt6);
+ if (likely(rt)) {
+ dst = &rt->dst;
+ dst_hold(dst);
+ }
- rt = vrf->rt6;
- dst_hold(&rt->dst);
+ rcu_read_unlock();
}
- return (struct dst_entry *)rt;
+ return dst;
}
#endif
@@ -679,6 +706,7 @@ static const struct l3mdev_ops vrf_l3mdev_ops = {
.l3mdev_fib_table = vrf_fib_table,
.l3mdev_get_rtable = vrf_get_rtable,
.l3mdev_get_saddr = vrf_get_saddr,
+ .l3mdev_l3_rcv = vrf_l3_rcv,
#if IS_ENABLED(CONFIG_IPV6)
.l3mdev_get_rt6_dst = vrf_get_rt6_dst,
#endif
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7e29b5501..b3b9db68f 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -98,7 +98,6 @@ struct vxlan_fdb {
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
-static struct workqueue_struct *vxlan_wq;
static inline bool vxlan_collect_metadata(struct vxlan_sock *vs)
{
@@ -551,16 +550,15 @@ static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb,
return vh;
}
-static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
- struct sk_buff *skb,
- struct udp_offload *uoff)
+static struct sk_buff **vxlan_gro_receive(struct sock *sk,
+ struct sk_buff **head,
+ struct sk_buff *skb)
{
struct sk_buff *p, **pp = NULL;
struct vxlanhdr *vh, *vh2;
unsigned int hlen, off_vx;
int flush = 1;
- struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock,
- udp_offloads);
+ struct vxlan_sock *vs = rcu_dereference_sk_user_data(sk);
__be32 flags;
struct gro_remcsum grc;
@@ -613,8 +611,7 @@ out:
return pp;
}
-static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
- struct udp_offload *uoff)
+static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
{
/* Sets 'skb->inner_mac_header' since we are always called with
* 'skb->encapsulation' set.
@@ -630,13 +627,6 @@ static void vxlan_notify_add_rx_port(struct vxlan_sock *vs)
struct net *net = sock_net(sk);
sa_family_t sa_family = vxlan_get_sk_family(vs);
__be16 port = inet_sk(sk)->inet_sport;
- int err;
-
- if (sa_family == AF_INET) {
- err = udp_add_offload(net, &vs->udp_offloads);
- if (err)
- pr_warn("vxlan: udp_add_offload failed with status %d\n", err);
- }
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
@@ -663,9 +653,6 @@ static void vxlan_notify_del_rx_port(struct vxlan_sock *vs)
port);
}
rcu_read_unlock();
-
- if (sa_family == AF_INET)
- udp_del_offload(&vs->udp_offloads);
}
/* Add new entry to forwarding table -- assumes lock held */
@@ -1051,14 +1038,14 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev)
return false;
}
-static void __vxlan_sock_release(struct vxlan_sock *vs)
+static bool __vxlan_sock_release_prep(struct vxlan_sock *vs)
{
struct vxlan_net *vn;
if (!vs)
- return;
+ return false;
if (!atomic_dec_and_test(&vs->refcnt))
- return;
+ return false;
vn = net_generic(sock_net(vs->sock->sk), vxlan_net_id);
spin_lock(&vn->sock_lock);
@@ -1066,14 +1053,28 @@ static void __vxlan_sock_release(struct vxlan_sock *vs)
vxlan_notify_del_rx_port(vs);
spin_unlock(&vn->sock_lock);
- queue_work(vxlan_wq, &vs->del_work);
+ return true;
}
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
- __vxlan_sock_release(vxlan->vn4_sock);
+ bool ipv4 = __vxlan_sock_release_prep(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
- __vxlan_sock_release(vxlan->vn6_sock);
+ bool ipv6 = __vxlan_sock_release_prep(vxlan->vn6_sock);
+#endif
+
+ synchronize_net();
+
+ if (ipv4) {
+ udp_tunnel_sock_release(vxlan->vn4_sock->sock);
+ kfree(vxlan->vn4_sock);
+ }
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6) {
+ udp_tunnel_sock_release(vxlan->vn6_sock->sock);
+ kfree(vxlan->vn6_sock);
+ }
#endif
}
@@ -1193,6 +1194,45 @@ out:
unparsed->vx_flags &= ~VXLAN_GBP_USED_BITS;
}
+static bool vxlan_parse_gpe_hdr(struct vxlanhdr *unparsed,
+ __be16 *protocol,
+ struct sk_buff *skb, u32 vxflags)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)unparsed;
+
+ /* Need to have Next Protocol set for interfaces in GPE mode. */
+ if (!gpe->np_applied)
+ return false;
+ /* "The initial version is 0. If a receiver does not support the
+ * version indicated it MUST drop the packet.
+ */
+ if (gpe->version != 0)
+ return false;
+ /* "When the O bit is set to 1, the packet is an OAM packet and OAM
+ * processing MUST occur." However, we don't implement OAM
+ * processing, thus drop the packet.
+ */
+ if (gpe->oam_flag)
+ return false;
+
+ switch (gpe->next_protocol) {
+ case VXLAN_GPE_NP_IPV4:
+ *protocol = htons(ETH_P_IP);
+ break;
+ case VXLAN_GPE_NP_IPV6:
+ *protocol = htons(ETH_P_IPV6);
+ break;
+ case VXLAN_GPE_NP_ETHERNET:
+ *protocol = htons(ETH_P_TEB);
+ break;
+ default:
+ return false;
+ }
+
+ unparsed->vx_flags &= ~VXLAN_GPE_USED_BITS;
+ return true;
+}
+
static bool vxlan_set_mac(struct vxlan_dev *vxlan,
struct vxlan_sock *vs,
struct sk_buff *skb)
@@ -1258,9 +1298,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
struct vxlanhdr unparsed;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
+ __be16 protocol = htons(ETH_P_TEB);
+ bool raw_proto = false;
void *oiph;
- /* Need Vxlan and inner Ethernet header to be present */
+ /* Need UDP and VXLAN header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
goto drop;
@@ -1284,9 +1326,18 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
if (!vxlan)
goto drop;
- if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB),
- !net_eq(vxlan->net, dev_net(vxlan->dev))))
- goto drop;
+ /* For backwards compatibility, only allow reserved fields to be
+ * used by VXLAN extensions if explicitly requested.
+ */
+ if (vs->flags & VXLAN_F_GPE) {
+ if (!vxlan_parse_gpe_hdr(&unparsed, &protocol, skb, vs->flags))
+ goto drop;
+ raw_proto = true;
+ }
+
+ if (__iptunnel_pull_header(skb, VXLAN_HLEN, protocol, raw_proto,
+ !net_eq(vxlan->net, dev_net(vxlan->dev))))
+ goto drop;
if (vxlan_collect_metadata(vs)) {
__be32 vni = vxlan_vni(vxlan_hdr(skb)->vx_vni);
@@ -1305,14 +1356,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
memset(md, 0, sizeof(*md));
}
- /* For backwards compatibility, only allow reserved fields to be
- * used by VXLAN extensions if explicitly requested.
- */
if (vs->flags & VXLAN_F_REMCSUM_RX)
if (!vxlan_remcsum(&unparsed, skb, vs->flags))
goto drop;
if (vs->flags & VXLAN_F_GBP)
vxlan_parse_gbp_hdr(&unparsed, skb, vs->flags, md);
+ /* Note that GBP and GPE can never be active together. This is
+ * ensured in vxlan_dev_configure.
+ */
if (unparsed.vx_flags || unparsed.vx_vni) {
/* If there are any unprocessed flags remaining treat
@@ -1326,8 +1377,14 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
}
- if (!vxlan_set_mac(vxlan, vs, skb))
- goto drop;
+ if (!raw_proto) {
+ if (!vxlan_set_mac(vxlan, vs, skb))
+ goto drop;
+ } else {
+ skb_reset_mac_header(skb);
+ skb->dev = vxlan->dev;
+ skb->pkt_type = PACKET_HOST;
+ }
oiph = skb_network_header(skb);
skb_reset_network_header(skb);
@@ -1686,6 +1743,27 @@ static void vxlan_build_gbp_hdr(struct vxlanhdr *vxh, u32 vxflags,
gbp->policy_id = htons(md->gbp & VXLAN_GBP_ID_MASK);
}
+static int vxlan_build_gpe_hdr(struct vxlanhdr *vxh, u32 vxflags,
+ __be16 protocol)
+{
+ struct vxlanhdr_gpe *gpe = (struct vxlanhdr_gpe *)vxh;
+
+ gpe->np_applied = 1;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV4;
+ return 0;
+ case htons(ETH_P_IPV6):
+ gpe->next_protocol = VXLAN_GPE_NP_IPV6;
+ return 0;
+ case htons(ETH_P_TEB):
+ gpe->next_protocol = VXLAN_GPE_NP_ETHERNET;
+ return 0;
+ }
+ return -EPFNOSUPPORT;
+}
+
static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
int iphdr_len, __be32 vni,
struct vxlan_metadata *md, u32 vxflags,
@@ -1695,6 +1773,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
int min_headroom;
int err;
int type = udp_sum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
+ __be16 inner_protocol = htons(ETH_P_TEB);
if ((vxflags & VXLAN_F_REMCSUM_TX) &&
skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1713,18 +1792,16 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err)) {
- kfree_skb(skb);
- return err;
- }
+ if (unlikely(err))
+ goto out_free;
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
return -ENOMEM;
- skb = iptunnel_handle_offloads(skb, type);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ err = iptunnel_handle_offloads(skb, type);
+ if (err)
+ goto out_free;
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = VXLAN_HF_VNI;
@@ -1745,9 +1822,19 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
if (vxflags & VXLAN_F_GBP)
vxlan_build_gbp_hdr(vxh, vxflags, md);
+ if (vxflags & VXLAN_F_GPE) {
+ err = vxlan_build_gpe_hdr(vxh, vxflags, skb->protocol);
+ if (err < 0)
+ goto out_free;
+ inner_protocol = skb->protocol;
+ }
- skb_set_inner_protocol(skb, htons(ETH_P_TEB));
+ skb_set_inner_protocol(skb, inner_protocol);
return 0;
+
+out_free:
+ kfree_skb(skb);
+ return err;
}
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan,
@@ -2107,9 +2194,17 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
info = skb_tunnel_info(skb);
skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY)) {
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (info && info->mode & IP_TUNNEL_INFO_TX)
+ vxlan_xmit_one(skb, dev, NULL, false);
+ else
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (vxlan->flags & VXLAN_F_PROXY) {
+ eth = eth_hdr(skb);
if (ntohs(eth->h_proto) == ETH_P_ARP)
return arp_reduce(dev, skb);
#if IS_ENABLED(CONFIG_IPV6)
@@ -2124,18 +2219,10 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
return neigh_reduce(dev, skb);
}
- eth = eth_hdr(skb);
#endif
}
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
- if (info && info->mode & IP_TUNNEL_INFO_TX)
- vxlan_xmit_one(skb, dev, NULL, false);
- else
- kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
+ eth = eth_hdr(skb);
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
@@ -2405,7 +2492,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
return 0;
}
-static const struct net_device_ops vxlan_netdev_ops = {
+static const struct net_device_ops vxlan_netdev_ether_ops = {
.ndo_init = vxlan_init,
.ndo_uninit = vxlan_uninit,
.ndo_open = vxlan_open,
@@ -2422,6 +2509,17 @@ static const struct net_device_ops vxlan_netdev_ops = {
.ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
};
+static const struct net_device_ops vxlan_netdev_raw_ops = {
+ .ndo_init = vxlan_init,
+ .ndo_uninit = vxlan_uninit,
+ .ndo_open = vxlan_open,
+ .ndo_stop = vxlan_stop,
+ .ndo_start_xmit = vxlan_xmit,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
+ .ndo_change_mtu = vxlan_change_mtu,
+ .ndo_fill_metadata_dst = vxlan_fill_metadata_dst,
+};
+
/* Info for udev, that this is a virtual tunnel endpoint */
static struct device_type vxlan_type = {
.name = "vxlan",
@@ -2431,7 +2529,7 @@ static struct device_type vxlan_type = {
* supply the listening VXLAN udp ports. Callers are expected
* to implement the ndo_add_vxlan_port.
*/
-void vxlan_get_rx_port(struct net_device *dev)
+static void vxlan_push_rx_ports(struct net_device *dev)
{
struct vxlan_sock *vs;
struct net *net = dev_net(dev);
@@ -2440,6 +2538,9 @@ void vxlan_get_rx_port(struct net_device *dev)
__be16 port;
unsigned int i;
+ if (!dev->netdev_ops->ndo_add_vxlan_port)
+ return;
+
spin_lock(&vn->sock_lock);
for (i = 0; i < PORT_HASH_SIZE; ++i) {
hlist_for_each_entry_rcu(vs, &vn->sock_list[i], hlist) {
@@ -2451,7 +2552,6 @@ void vxlan_get_rx_port(struct net_device *dev)
}
spin_unlock(&vn->sock_lock);
}
-EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
@@ -2462,7 +2562,6 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
SET_NETDEV_DEVTYPE(dev, &vxlan_type);
@@ -2477,8 +2576,7 @@ static void vxlan_setup(struct net_device *dev)
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
- dev->priv_flags &= ~IFF_TX_SKB_SHARING;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
@@ -2497,6 +2595,23 @@ static void vxlan_setup(struct net_device *dev)
INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
}
+static void vxlan_ether_setup(struct net_device *dev)
+{
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->netdev_ops = &vxlan_netdev_ether_ops;
+}
+
+static void vxlan_raw_setup(struct net_device *dev)
+{
+ dev->header_ops = NULL;
+ dev->type = ARPHRD_NONE;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->netdev_ops = &vxlan_netdev_raw_ops;
+}
+
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
[IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
@@ -2523,6 +2638,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 },
[IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 },
[IFLA_VXLAN_GBP] = { .type = NLA_FLAG, },
+ [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, },
[IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG },
};
@@ -2575,13 +2691,6 @@ static const struct ethtool_ops vxlan_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-static void vxlan_del_work(struct work_struct *work)
-{
- struct vxlan_sock *vs = container_of(work, struct vxlan_sock, del_work);
- udp_tunnel_sock_release(vs->sock);
- kfree_rcu(vs, rcu);
-}
-
static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
__be16 port, u32 flags)
{
@@ -2627,8 +2736,6 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
for (h = 0; h < VNI_HASH_SIZE; ++h)
INIT_HLIST_HEAD(&vs->vni_list[h]);
- INIT_WORK(&vs->del_work, vxlan_del_work);
-
sock = vxlan_create_sock(net, ipv6, port, flags);
if (IS_ERR(sock)) {
pr_info("Cannot bind port %d, err=%ld\n", ntohs(port),
@@ -2641,21 +2748,19 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6,
atomic_set(&vs->refcnt, 1);
vs->flags = (flags & VXLAN_F_RCV_FLAGS);
- /* Initialize the vxlan udp offloads structure */
- vs->udp_offloads.port = port;
- vs->udp_offloads.callbacks.gro_receive = vxlan_gro_receive;
- vs->udp_offloads.callbacks.gro_complete = vxlan_gro_complete;
-
spin_lock(&vn->sock_lock);
hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
vxlan_notify_add_rx_port(vs);
spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
tunnel_cfg.sk_user_data = vs;
tunnel_cfg.encap_type = 1;
tunnel_cfg.encap_rcv = vxlan_rcv;
tunnel_cfg.encap_destroy = NULL;
+ tunnel_cfg.gro_receive = vxlan_gro_receive;
+ tunnel_cfg.gro_complete = vxlan_gro_complete;
setup_udp_tunnel_sock(net, sock, &tunnel_cfg);
@@ -2723,6 +2828,21 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
__be16 default_port = vxlan->cfg.dst_port;
struct net_device *lowerdev = NULL;
+ if (conf->flags & VXLAN_F_GPE) {
+ if (conf->flags & ~VXLAN_F_ALLOWED_GPE)
+ return -EINVAL;
+ /* For now, allow GPE only together with COLLECT_METADATA.
+ * This can be relaxed later; in such case, the other side
+ * of the PtP link will have to be provided.
+ */
+ if (!(conf->flags & VXLAN_F_COLLECT_METADATA))
+ return -EINVAL;
+
+ vxlan_raw_setup(dev);
+ } else {
+ vxlan_ether_setup(dev);
+ }
+
vxlan->net = src_net;
dst->remote_vni = conf->vni;
@@ -2784,8 +2904,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->needed_headroom = needed_headroom;
memcpy(&vxlan->cfg, conf, sizeof(*conf));
- if (!vxlan->cfg.dst_port)
- vxlan->cfg.dst_port = default_port;
+ if (!vxlan->cfg.dst_port) {
+ if (conf->flags & VXLAN_F_GPE)
+ vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ else
+ vxlan->cfg.dst_port = default_port;
+ }
vxlan->flags |= conf->flags;
if (!vxlan->cfg.age_interval)
@@ -2828,30 +2952,6 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
-struct net_device *vxlan_dev_create(struct net *net, const char *name,
- u8 name_assign_type, struct vxlan_config *conf)
-{
- struct nlattr *tb[IFLA_MAX+1];
- struct net_device *dev;
- int err;
-
- memset(&tb, 0, sizeof(tb));
-
- dev = rtnl_create_link(net, name, name_assign_type,
- &vxlan_link_ops, tb);
- if (IS_ERR(dev))
- return dev;
-
- err = vxlan_dev_configure(net, dev, conf);
- if (err < 0) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
-
- return dev;
-}
-EXPORT_SYMBOL_GPL(vxlan_dev_create);
-
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
@@ -2956,6 +3056,9 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (data[IFLA_VXLAN_GBP])
conf.flags |= VXLAN_F_GBP;
+ if (data[IFLA_VXLAN_GPE])
+ conf.flags |= VXLAN_F_GPE;
+
if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL])
conf.flags |= VXLAN_F_REMCSUM_NOPARTIAL;
@@ -2975,6 +3078,10 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
case -EEXIST:
pr_info("duplicate VNI %u\n", be32_to_cpu(conf.vni));
break;
+
+ case -EINVAL:
+ pr_info("unsupported combination of extensions\n");
+ break;
}
return err;
@@ -3102,6 +3209,10 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_flag(skb, IFLA_VXLAN_GBP))
goto nla_put_failure;
+ if (vxlan->flags & VXLAN_F_GPE &&
+ nla_put_flag(skb, IFLA_VXLAN_GPE))
+ goto nla_put_failure;
+
if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL &&
nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL))
goto nla_put_failure;
@@ -3133,6 +3244,40 @@ static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
.get_link_net = vxlan_get_link_net,
};
+struct net_device *vxlan_dev_create(struct net *net, const char *name,
+ u8 name_assign_type,
+ struct vxlan_config *conf)
+{
+ struct nlattr *tb[IFLA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ memset(&tb, 0, sizeof(tb));
+
+ dev = rtnl_create_link(net, name, name_assign_type,
+ &vxlan_link_ops, tb);
+ if (IS_ERR(dev))
+ return dev;
+
+ err = vxlan_dev_configure(net, dev, conf);
+ if (err < 0) {
+ free_netdev(dev);
+ return ERR_PTR(err);
+ }
+
+ err = rtnl_configure_link(dev, NULL);
+ if (err < 0) {
+ LIST_HEAD(list_kill);
+
+ vxlan_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return ERR_PTR(err);
+ }
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(vxlan_dev_create);
+
static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
struct net_device *dev)
{
@@ -3155,20 +3300,22 @@ static void vxlan_handle_lowerdev_unregister(struct vxlan_net *vn,
unregister_netdevice_many(&list_kill);
}
-static int vxlan_lowerdev_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int vxlan_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
if (event == NETDEV_UNREGISTER)
vxlan_handle_lowerdev_unregister(vn, dev);
+ else if (event == NETDEV_OFFLOAD_PUSH_VXLAN)
+ vxlan_push_rx_ports(dev);
return NOTIFY_DONE;
}
static struct notifier_block vxlan_notifier_block __read_mostly = {
- .notifier_call = vxlan_lowerdev_event,
+ .notifier_call = vxlan_netdevice_event,
};
static __net_init int vxlan_init_net(struct net *net)
@@ -3222,10 +3369,6 @@ static int __init vxlan_init_module(void)
{
int rc;
- vxlan_wq = alloc_workqueue("vxlan", 0, 0);
- if (!vxlan_wq)
- return -ENOMEM;
-
get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
rc = register_pernet_subsys(&vxlan_net_ops);
@@ -3246,7 +3389,6 @@ out3:
out2:
unregister_pernet_subsys(&vxlan_net_ops);
out1:
- destroy_workqueue(vxlan_wq);
return rc;
}
late_initcall(vxlan_init_module);
@@ -3255,7 +3397,6 @@ static void __exit vxlan_cleanup_module(void)
{
rtnl_link_unregister(&vxlan_link_ops);
unregister_netdevice_notifier(&vxlan_notifier_block);
- destroy_workqueue(vxlan_wq);
unregister_pernet_subsys(&vxlan_net_ops);
/* rcu_barrier() is called by netns */
}
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 848ea6a39..b87fe0a01 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -739,7 +739,7 @@ static char *cosa_net_setup_rx(struct channel_data *chan, int size)
chan->netdev->stats.rx_dropped++;
return NULL;
}
- chan->netdev->trans_start = jiffies;
+ netif_trans_update(chan->netdev);
return skb_put(chan->rx_skb, size);
}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 69b994f3b..3c9cbf908 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -831,7 +831,7 @@ fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/*
@@ -1389,7 +1389,7 @@ do_bottom_half_tx(struct fst_card_info *card)
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else {
/* Or do it through dma */
memcpy(card->tx_dma_handle_host,
@@ -2258,7 +2258,7 @@ fst_tx_timeout(struct net_device *dev)
card->card_no, port->index);
fst_issue_cmd(port, ABORTTX);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
port->start = 0;
}
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index bb33b242a..299140c04 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -2105,7 +2105,7 @@ static void lmc_driver_timeout(struct net_device *dev)
sc->lmc_device->stats.tx_errors++;
sc->extra_stats.tx_ProcTimeout++; /* -baz */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
bug_out:
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 8fef8d834..d98c7e571 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -860,9 +860,9 @@ prepare_to_send( struct sk_buff *skb, struct net_device *dev )
outb( inb( dev->base_addr + CSR0 ) | TR_REQ, dev->base_addr + CSR0 );
#ifdef CONFIG_SBNI_MULTILINE
- nl->master->trans_start = jiffies;
+ netif_trans_update(nl->master);
#else
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#endif
}
@@ -889,10 +889,10 @@ drop_xmit_queue( struct net_device *dev )
nl->state &= ~(FL_WAIT_ACK | FL_NEED_RESEND);
#ifdef CONFIG_SBNI_MULTILINE
netif_start_queue( nl->master );
- nl->master->trans_start = jiffies;
+ netif_trans_update(nl->master);
#else
netif_start_queue( dev );
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#endif
}
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index a9970f1af..bb74f4b9a 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -334,7 +334,7 @@ int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
i2400m, net_dev, skb);
/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
- net_dev->trans_start = jiffies;
+ netif_trans_update(net_dev);
i2400m_tx_prep_header(skb);
d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
skb, skb->len);
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index 15f057ed4..70ecd82d6 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -440,7 +440,7 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
rx_status.rate_idx = rate;
rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
- rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.band = NL80211_BAND_2GHZ;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(dev, skb);
@@ -1894,7 +1894,7 @@ static int adm8211_probe(struct pci_dev *pdev,
priv->channel = 1;
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
err = ieee80211_register_hw(dev);
if (err) {
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index ccfa825a0..425fa1d32 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1471,12 +1471,12 @@ static int ar5523_init_modes(struct ar5523 *ar)
memcpy(ar->channels, ar5523_channels, sizeof(ar5523_channels));
memcpy(ar->rates, ar5523_rates, sizeof(ar5523_rates));
- ar->band.band = IEEE80211_BAND_2GHZ;
+ ar->band.band = NL80211_BAND_2GHZ;
ar->band.channels = ar->channels;
ar->band.n_channels = ARRAY_SIZE(ar5523_channels);
ar->band.bitrates = ar->rates;
ar->band.n_bitrates = ARRAY_SIZE(ar5523_rates);
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &ar->band;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = &ar->band;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 65ef483eb..da7a7c8da 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -185,7 +185,7 @@ struct ath_common {
bool bt_ant_diversity;
int last_rssi;
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
};
static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index edf362928..9fb8d7472 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -411,7 +411,8 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
lockdep_assert_held(&ar_pci->ce_lock);
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ if ((pipe->id != 5) &&
+ CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
return -ENOSPC;
desc->addr = __cpu_to_le32(paddr);
@@ -425,6 +426,19 @@ int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
return 0;
}
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ u32 ctrl_addr = pipe->ctrl_addr;
+
+ write_index = CE_RING_IDX_ADD(nentries_mask, write_index, nentries);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+}
+
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
{
struct ath10k *ar = pipe->ar;
@@ -444,14 +458,10 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
*/
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
- struct ath10k *ar = ce_state->ar;
unsigned int sw_index = dest_ring->sw_index;
struct ce_desc *base = dest_ring->base_addr_owner_space;
@@ -476,21 +486,17 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
desc->nbytes = 0;
/* Return data from completed destination descriptor */
- *bufferp = __le32_to_cpu(sdesc.addr);
*nbytesp = nbytes;
- *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA);
-
- if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP)
- *flagsp = CE_RECV_FLAG_SWAPPED;
- else
- *flagsp = 0;
if (per_transfer_contextp)
*per_transfer_contextp =
dest_ring->per_transfer_context[sw_index];
- /* sanity */
- dest_ring->per_transfer_context[sw_index] = NULL;
+ /* Copy engine 5 (HTT Rx) will reuse the same transfer context.
+ * So update transfer context all CEs except CE5.
+ */
+ if (ce_state->id != 5)
+ dest_ring->per_transfer_context[sw_index] = NULL;
/* Update sw_index */
sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
@@ -501,10 +507,7 @@ int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp)
+ unsigned int *nbytesp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -513,8 +516,7 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_recv_next_nolock(ce_state,
per_transfer_contextp,
- bufferp, nbytesp,
- transfer_idp, flagsp);
+ nbytesp);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -1048,11 +1050,11 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
*
* For the lack of a better place do the check here.
*/
- BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- BUILD_BUG_ON(2*TARGET_TLV_NUM_MSDU_DESC >
+ BUILD_BUG_ON(2 * TARGET_TLV_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 47b734ce7..dfc098606 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -22,7 +22,7 @@
/* Maximum number of Copy Engine's supported */
#define CE_COUNT_MAX 12
-#define CE_HTT_H2T_MSG_SRC_NENTRIES 4096
+#define CE_HTT_H2T_MSG_SRC_NENTRIES 8192
/* Descriptor rings must be aligned to this boundary */
#define CE_DESC_RING_ALIGN 8
@@ -166,6 +166,7 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe);
int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
+void ath10k_ce_rx_update_write_idx(struct ath10k_ce_pipe *pipe, u32 nentries);
/* recv flags */
/* Data is byte-swapped */
@@ -177,10 +178,7 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr);
*/
int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Supply data for the next completed unprocessed send descriptor.
* Pops 1 completed send buffer from Source ring.
@@ -212,10 +210,7 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
- u32 *bufferp,
- unsigned int *nbytesp,
- unsigned int *transfer_idp,
- unsigned int *flagsp);
+ unsigned int *nbytesp);
/*
* Support clean shutdown by allowing the caller to cancel
@@ -413,9 +408,11 @@ static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
/* Ring arithmetic (modulus number of entries in ring, which is a pwr of 2). */
#define CE_RING_DELTA(nentries_mask, fromidx, toidx) \
- (((int)(toidx)-(int)(fromidx)) & (nentries_mask))
+ (((int)(toidx) - (int)(fromidx)) & (nentries_mask))
#define CE_RING_IDX_INCR(nentries_mask, idx) (((idx) + 1) & (nentries_mask))
+#define CE_RING_IDX_ADD(nentries_mask, idx, num) \
+ (((idx) + (num)) & (nentries_mask))
#define CE_WRAPPER_INTERRUPT_SUMMARY_HOST_MSI_LSB \
ar->regs->ce_wrap_intr_sum_host_msi_lsb
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 3ede921da..697f8d054 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -60,10 +60,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .fw = QCA988X_HW_2_0_FW_FILE,
- .otp = QCA988X_HW_2_0_OTP_FILE,
.board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
@@ -78,10 +77,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -97,10 +95,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .fw = QCA6174_HW_2_1_FW_FILE,
- .otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -116,10 +113,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -135,11 +131,10 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_AFTER,
+ .cal_data_len = 8124,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
- .fw = QCA6174_HW_3_0_FW_FILE,
- .otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
@@ -156,15 +151,12 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.channel_counters_freq_hz = 150000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
- .num_msdu_desc = 1424,
- .qcache_active_peers = 50,
.tx_chain_mask = 0xf,
.rx_chain_mask = 0xf,
.max_spatial_stream = 4,
+ .cal_data_len = 12064,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
- .fw = QCA99X0_HW_2_0_FW_FILE,
- .otp = QCA99X0_HW_2_0_OTP_FILE,
.board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
@@ -179,10 +171,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -197,10 +188,9 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
+ .cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .fw = QCA9377_HW_1_0_FW_FILE,
- .otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
@@ -212,20 +202,18 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .has_shifted_cc_wraparound = true,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
.channel_counters_freq_hz = 125000,
.max_probe_resp_desc_thres = 24,
.hw_4addr_pad = ATH10K_HW_4ADDR_PAD_BEFORE,
- .num_msdu_desc = 2500,
- .qcache_active_peers = 35,
.tx_chain_mask = 0x3,
.rx_chain_mask = 0x3,
.max_spatial_stream = 2,
+ .cal_data_len = 12064,
.fw = {
.dir = QCA4019_HW_1_0_FW_DIR,
- .fw = QCA4019_HW_1_0_FW_FILE,
- .otp = QCA4019_HW_1_0_OTP_FILE,
.board = QCA4019_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
@@ -274,7 +262,7 @@ void ath10k_core_get_fw_features_str(struct ath10k *ar,
int i;
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
- if (test_bit(i, ar->fw_features)) {
+ if (test_bit(i, ar->normal_mode_fw.fw_file.fw_features)) {
if (len > 0)
len += scnprintf(buf + len, buf_len - len, ",");
@@ -466,18 +454,18 @@ exit:
return ret;
}
-static int ath10k_download_cal_file(struct ath10k *ar)
+static int ath10k_download_cal_file(struct ath10k *ar,
+ const struct firmware *file)
{
int ret;
- if (!ar->cal_file)
+ if (!file)
return -ENOENT;
- if (IS_ERR(ar->cal_file))
- return PTR_ERR(ar->cal_file);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
- ret = ath10k_download_board_data(ar, ar->cal_file->data,
- ar->cal_file->size);
+ ret = ath10k_download_board_data(ar, file->data, file->size);
if (ret) {
ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
return ret;
@@ -488,7 +476,7 @@ static int ath10k_download_cal_file(struct ath10k *ar)
return 0;
}
-static int ath10k_download_cal_dt(struct ath10k *ar)
+static int ath10k_download_cal_dt(struct ath10k *ar, const char *dt_name)
{
struct device_node *node;
int data_len;
@@ -502,13 +490,12 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
*/
return -ENOENT;
- if (!of_get_property(node, "qcom,ath10k-calibration-data",
- &data_len)) {
+ if (!of_get_property(node, dt_name, &data_len)) {
/* The calibration data node is optional */
return -ENOENT;
}
- if (data_len != QCA988X_CAL_DATA_LEN) {
+ if (data_len != ar->hw_params.cal_data_len) {
ath10k_warn(ar, "invalid calibration data length in DT: %d\n",
data_len);
ret = -EMSGSIZE;
@@ -521,8 +508,7 @@ static int ath10k_download_cal_dt(struct ath10k *ar)
goto out;
}
- ret = of_property_read_u8_array(node, "qcom,ath10k-calibration-data",
- data, data_len);
+ ret = of_property_read_u8_array(node, dt_name, data, data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data from DT: %d\n",
ret);
@@ -553,7 +539,8 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
address = ar->hw_params.patch_load_addr;
- if (!ar->otp_data || !ar->otp_len) {
+ if (!ar->normal_mode_fw.fw_file.otp_data ||
+ !ar->normal_mode_fw.fw_file.otp_len) {
ath10k_warn(ar,
"failed to retrieve board id because of invalid otp\n");
return -ENODATA;
@@ -561,9 +548,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot upload otp to 0x%x len %zd for board id\n",
- address, ar->otp_len);
+ address, ar->normal_mode_fw.fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp for board id check: %d\n",
ret);
@@ -601,7 +590,9 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
u32 bmi_otp_exe_param = ar->hw_params.otp_exe_param;
int ret;
- ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
+ ret = ath10k_download_board_data(ar,
+ ar->running_fw->board_data,
+ ar->running_fw->board_len);
if (ret) {
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
@@ -609,16 +600,20 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
/* OTP is optional */
- if (!ar->otp_data || !ar->otp_len) {
+ if (!ar->running_fw->fw_file.otp_data ||
+ !ar->running_fw->fw_file.otp_len) {
ath10k_warn(ar, "Not running otp, calibration will be incorrect (otp-data %p otp_len %zd)!\n",
- ar->otp_data, ar->otp_len);
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
return 0;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot upload otp to 0x%x len %zd\n",
- address, ar->otp_len);
+ address, ar->running_fw->fw_file.otp_len);
- ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
+ ret = ath10k_bmi_fast_download(ar, address,
+ ar->running_fw->fw_file.otp_data,
+ ar->running_fw->fw_file.otp_len);
if (ret) {
ath10k_err(ar, "could not write otp (%d)\n", ret);
return ret;
@@ -633,7 +628,7 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot otp execute result %d\n", result);
if (!(skip_otp || test_bit(ATH10K_FW_FEATURE_IGNORE_OTP_RESULT,
- ar->fw_features)) &&
+ ar->running_fw->fw_file.fw_features)) &&
result != 0) {
ath10k_err(ar, "otp calibration failed: %d", result);
return -EINVAL;
@@ -642,46 +637,32 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
return 0;
}
-static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
+static int ath10k_download_fw(struct ath10k *ar)
{
u32 address, data_len;
- const char *mode_name;
const void *data;
int ret;
address = ar->hw_params.patch_load_addr;
- switch (mode) {
- case ATH10K_FIRMWARE_MODE_NORMAL:
- data = ar->firmware_data;
- data_len = ar->firmware_len;
- mode_name = "normal";
- ret = ath10k_swap_code_seg_configure(ar,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW);
- if (ret) {
- ath10k_err(ar, "failed to configure fw code swap: %d\n",
- ret);
- return ret;
- }
- break;
- case ATH10K_FIRMWARE_MODE_UTF:
- data = ar->testmode.utf_firmware_data;
- data_len = ar->testmode.utf_firmware_len;
- mode_name = "utf";
- break;
- default:
- ath10k_err(ar, "unknown firmware mode: %d\n", mode);
- return -EINVAL;
+ data = ar->running_fw->fw_file.firmware_data;
+ data_len = ar->running_fw->fw_file.firmware_len;
+
+ ret = ath10k_swap_code_seg_configure(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to configure fw code swap: %d\n",
+ ret);
+ return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
- "boot uploading firmware image %p len %d mode %s\n",
- data, data_len, mode_name);
+ "boot uploading firmware image %p len %d\n",
+ data, data_len);
ret = ath10k_bmi_fast_download(ar, address, data, data_len);
if (ret) {
- ath10k_err(ar, "failed to download %s firmware: %d\n",
- mode_name, ret);
+ ath10k_err(ar, "failed to download firmware: %d\n",
+ ret);
return ret;
}
@@ -690,42 +671,50 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
static void ath10k_core_free_board_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->board))
- release_firmware(ar->board);
+ if (!IS_ERR(ar->normal_mode_fw.board))
+ release_firmware(ar->normal_mode_fw.board);
- ar->board = NULL;
- ar->board_data = NULL;
- ar->board_len = 0;
+ ar->normal_mode_fw.board = NULL;
+ ar->normal_mode_fw.board_data = NULL;
+ ar->normal_mode_fw.board_len = 0;
}
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
- if (!IS_ERR(ar->otp))
- release_firmware(ar->otp);
-
- if (!IS_ERR(ar->firmware))
- release_firmware(ar->firmware);
+ if (!IS_ERR(ar->normal_mode_fw.fw_file.firmware))
+ release_firmware(ar->normal_mode_fw.fw_file.firmware);
if (!IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
+ if (!IS_ERR(ar->pre_cal_file))
+ release_firmware(ar->pre_cal_file);
+
ath10k_swap_code_seg_release(ar);
- ar->otp = NULL;
- ar->otp_data = NULL;
- ar->otp_len = 0;
+ ar->normal_mode_fw.fw_file.otp_data = NULL;
+ ar->normal_mode_fw.fw_file.otp_len = 0;
- ar->firmware = NULL;
- ar->firmware_data = NULL;
- ar->firmware_len = 0;
+ ar->normal_mode_fw.fw_file.firmware = NULL;
+ ar->normal_mode_fw.fw_file.firmware_data = NULL;
+ ar->normal_mode_fw.fw_file.firmware_len = 0;
ar->cal_file = NULL;
+ ar->pre_cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
{
char filename[100];
+ /* pre-cal-<bus>-<id>.bin */
+ scnprintf(filename, sizeof(filename), "/*(DEBLOBBED)*/",
+ ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
+
+ ar->pre_cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
+ if (!IS_ERR(ar->pre_cal_file))
+ goto success;
+
/*(DEBLOBBED)*/
scnprintf(filename, sizeof(filename), "/*(DEBLOBBED)*/",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -734,7 +723,7 @@ static int ath10k_fetch_cal_file(struct ath10k *ar)
if (IS_ERR(ar->cal_file))
/* calibration file is optional, don't print any warnings */
return PTR_ERR(ar->cal_file);
-
+success:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
ATH10K_FW_DIR, filename);
@@ -748,14 +737,14 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar)
return -EINVAL;
}
- ar->board = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- ar->board_data = ar->board->data;
- ar->board_len = ar->board->size;
+ ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
+ ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
return 0;
}
@@ -815,8 +804,8 @@ static int ath10k_core_parse_bd_ie_board(struct ath10k *ar,
"boot found board data for '%s'",
boardname);
- ar->board_data = board_ie_data;
- ar->board_len = board_ie_len;
+ ar->normal_mode_fw.board_data = board_ie_data;
+ ar->normal_mode_fw.board_len = board_ie_len;
ret = 0;
goto out;
@@ -849,12 +838,14 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
const u8 *data;
int ret, ie_id;
- ar->board = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, filename);
- if (IS_ERR(ar->board))
- return PTR_ERR(ar->board);
+ ar->normal_mode_fw.board = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ filename);
+ if (IS_ERR(ar->normal_mode_fw.board))
+ return PTR_ERR(ar->normal_mode_fw.board);
- data = ar->board->data;
- len = ar->board->size;
+ data = ar->normal_mode_fw.board->data;
+ len = ar->normal_mode_fw.board->size;
/* magic has extra null byte padded */
magic_len = strlen(ATH10K_BOARD_MAGIC) + 1;
@@ -921,7 +912,7 @@ static int ath10k_core_fetch_board_data_api_n(struct ath10k *ar,
}
out:
- if (!ar->board_data || !ar->board_len) {
+ if (!ar->normal_mode_fw.board_data || !ar->normal_mode_fw.board_len) {
ath10k_err(ar,
"failed to fetch board data for %s from %s/%s\n",
boardname, ar->hw_params.fw.dir, filename);
@@ -989,51 +980,8 @@ success:
return 0;
}
-static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
-{
- int ret = 0;
-
- if (ar->hw_params.fw.fw == NULL) {
- ath10k_err(ar, "firmware file not defined\n");
- return -EINVAL;
- }
-
- ar->firmware = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.fw);
- if (IS_ERR(ar->firmware)) {
- ret = PTR_ERR(ar->firmware);
- ath10k_err(ar, "could not fetch firmware (%d)\n", ret);
- goto err;
- }
-
- ar->firmware_data = ar->firmware->data;
- ar->firmware_len = ar->firmware->size;
-
- /* OTP may be undefined. If so, don't fetch it at all */
- if (ar->hw_params.fw.otp == NULL)
- return 0;
-
- ar->otp = ath10k_fetch_fw_file(ar,
- ar->hw_params.fw.dir,
- ar->hw_params.fw.otp);
- if (IS_ERR(ar->otp)) {
- ret = PTR_ERR(ar->otp);
- ath10k_err(ar, "could not fetch otp (%d)\n", ret);
- goto err;
- }
-
- ar->otp_data = ar->otp->data;
- ar->otp_len = ar->otp->size;
-
- return 0;
-
-err:
- ath10k_core_free_firmware_files(ar);
- return ret;
-}
-
-static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file)
{
size_t magic_len, len, ie_len;
int ie_id, i, index, bit, ret;
@@ -1042,15 +990,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
__le32 *timestamp, *version;
/* first fetch the firmware file (firmware-*.bin) */
- ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
- if (IS_ERR(ar->firmware)) {
+ fw_file->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
+ name);
+ if (IS_ERR(fw_file->firmware)) {
ath10k_err(ar, "could not fetch firmware file '%s/%s': %ld\n",
- ar->hw_params.fw.dir, name, PTR_ERR(ar->firmware));
- return PTR_ERR(ar->firmware);
+ ar->hw_params.fw.dir, name,
+ PTR_ERR(fw_file->firmware));
+ return PTR_ERR(fw_file->firmware);
}
- data = ar->firmware->data;
- len = ar->firmware->size;
+ data = fw_file->firmware->data;
+ len = fw_file->firmware->size;
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
@@ -1093,15 +1043,15 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
switch (ie_id) {
case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
+ if (ie_len > sizeof(fw_file->fw_version) - 1)
break;
- memcpy(ar->hw->wiphy->fw_version, data, ie_len);
- ar->hw->wiphy->fw_version[ie_len] = '\0';
+ memcpy(fw_file->fw_version, data, ie_len);
+ fw_file->fw_version[ie_len] = '\0';
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw version %s\n",
- ar->hw->wiphy->fw_version);
+ fw_file->fw_version);
break;
case ATH10K_FW_IE_TIMESTAMP:
if (ie_len != sizeof(u32))
@@ -1128,21 +1078,21 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"Enabling feature bit: %i\n",
i);
- __set_bit(i, ar->fw_features);
+ __set_bit(i, fw_file->fw_features);
}
}
ath10k_dbg_dump(ar, ATH10K_DBG_BOOT, "features", "",
- ar->fw_features,
- sizeof(ar->fw_features));
+ fw_file->fw_features,
+ sizeof(fw_file->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
- ar->firmware_data = data;
- ar->firmware_len = ie_len;
+ fw_file->firmware_data = data;
+ fw_file->firmware_len = ie_len;
break;
case ATH10K_FW_IE_OTP_IMAGE:
@@ -1150,8 +1100,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
"found otp image ie (%zd B)\n",
ie_len);
- ar->otp_data = data;
- ar->otp_len = ie_len;
+ fw_file->otp_data = data;
+ fw_file->otp_len = ie_len;
break;
case ATH10K_FW_IE_WMI_OP_VERSION:
@@ -1160,10 +1110,10 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->wmi.op_version = le32_to_cpup(version);
+ fw_file->wmi_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie wmi op version %d\n",
- ar->wmi.op_version);
+ fw_file->wmi_op_version);
break;
case ATH10K_FW_IE_HTT_OP_VERSION:
if (ie_len != sizeof(u32))
@@ -1171,17 +1121,17 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
version = (__le32 *)data;
- ar->htt.op_version = le32_to_cpup(version);
+ fw_file->htt_op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found fw ie htt op version %d\n",
- ar->htt.op_version);
+ fw_file->htt_op_version);
break;
case ATH10K_FW_IE_FW_CODE_SWAP_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"found fw code swap image ie (%zd B)\n",
ie_len);
- ar->swap.firmware_codeswap_data = data;
- ar->swap.firmware_codeswap_len = ie_len;
+ fw_file->codeswap_data = data;
+ fw_file->codeswap_len = ie_len;
break;
default:
ath10k_warn(ar, "Unknown FW IE: %u\n",
@@ -1196,7 +1146,8 @@ static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
data += ie_len;
}
- if (!ar->firmware_data || !ar->firmware_len) {
+ if (!fw_file->firmware_data ||
+ !fw_file->firmware_len) {
ath10k_warn(ar, "No ATH10K_FW_IE_FW_IMAGE found from '%s/%s', skipping\n",
ar->hw_params.fw.dir, name);
ret = -ENOMEDIUM;
@@ -1220,40 +1171,95 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
ar->fw_api = 5;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API5_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 4;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API4_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API3_FILE,
+ &ar->normal_mode_fw.fw_file);
if (ret == 0)
goto success;
ar->fw_api = 2;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
- ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
- if (ret == 0)
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE,
+ &ar->normal_mode_fw.fw_file);
+ if (ret)
+ return ret;
+
+success:
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_download(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_download_cal_file(ar, ar->pre_cal_file);
+ if (ret == 0) {
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_FILE;
goto success;
+ }
- ar->fw_api = 1;
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "boot did not find a pre calibration file, try DT next: %d\n",
+ ret);
- ret = ath10k_core_fetch_firmware_api_1(ar);
- if (ret)
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-pre-calibration-data");
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "unable to load pre cal data from DT: %d\n", ret);
return ret;
+ }
+ ar->cal_mode = ATH10K_PRE_CAL_MODE_DT;
success:
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
+ ath10k_cal_mode_str(ar->cal_mode));
+
+ return 0;
+}
+
+static int ath10k_core_pre_cal_config(struct ath10k *ar)
+{
+ int ret;
+
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "failed to load pre cal data: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_core_get_board_id_from_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to get board id: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath10k_download_and_run_otp(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to run otp: %d\n", ret);
+ return ret;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal configuration done successfully\n");
return 0;
}
@@ -1262,7 +1268,15 @@ static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
- ret = ath10k_download_cal_file(ar);
+ ret = ath10k_core_pre_cal_config(ar);
+ if (ret == 0)
+ return 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "pre cal download procedure failed, try cal file: %d\n",
+ ret);
+
+ ret = ath10k_download_cal_file(ar, ar->cal_file);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;
goto done;
@@ -1272,7 +1286,7 @@ static int ath10k_download_cal_data(struct ath10k *ar)
"boot did not find a calibration file, try DT next: %d\n",
ret);
- ret = ath10k_download_cal_dt(ar);
+ ret = ath10k_download_cal_dt(ar, "qcom,ath10k-calibration-data");
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_DT;
goto done;
@@ -1383,6 +1397,7 @@ static void ath10k_core_restart(struct work_struct *work)
complete_all(&ar->install_key_done);
complete_all(&ar->vdev_setup_done);
complete_all(&ar->thermal.wmi_sync);
+ complete_all(&ar->bss_survey_done);
wake_up(&ar->htt.empty_tx_wq);
wake_up(&ar->wmi.tx_credits_wq);
wake_up(&ar->peer_mapping_wq);
@@ -1420,15 +1435,17 @@ static void ath10k_core_restart(struct work_struct *work)
static int ath10k_core_init_firmware_features(struct ath10k *ar)
{
- if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features) &&
- !test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ struct ath10k_fw_file *fw_file = &ar->normal_mode_fw.fw_file;
+
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, fw_file->fw_features) &&
+ !test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
ath10k_err(ar, "feature bits corrupted: 10.2 feature requires 10.x feature to be set as well");
return -EINVAL;
}
- if (ar->wmi.op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
+ if (fw_file->wmi_op_version >= ATH10K_FW_WMI_OP_VERSION_MAX) {
ath10k_err(ar, "unsupported WMI OP version (max %d): %d\n",
- ATH10K_FW_WMI_OP_VERSION_MAX, ar->wmi.op_version);
+ ATH10K_FW_WMI_OP_VERSION_MAX, fw_file->wmi_op_version);
return -EINVAL;
}
@@ -1440,7 +1457,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
break;
case ATH10K_CRYPT_MODE_SW:
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "cryptmode > 0 requires raw mode support from firmware");
return -EINVAL;
}
@@ -1459,7 +1476,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
if (rawmode) {
if (!test_bit(ATH10K_FW_FEATURE_RAW_MODE_SUPPORT,
- ar->fw_features)) {
+ fw_file->fw_features)) {
ath10k_err(ar, "rawmode = 1 requires support from firmware");
return -EINVAL;
}
@@ -1484,19 +1501,19 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_WMI_OP_VERSION.
*/
- if (ar->wmi.op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
- if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
+ if (fw_file->wmi_op_version == ATH10K_FW_WMI_OP_VERSION_UNSET) {
+ if (test_bit(ATH10K_FW_FEATURE_WMI_10X, fw_file->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2,
- ar->fw_features))
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
+ fw_file->fw_features))
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_2;
else
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
} else {
- ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_MAIN;
}
}
- switch (ar->wmi.op_version) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->max_num_peers = TARGET_NUM_PEERS;
ar->max_num_stations = TARGET_NUM_STATIONS;
@@ -1509,7 +1526,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ if (ath10k_peer_stats_enabled(ar)) {
ar->max_num_peers = TARGET_10X_TX_STATS_NUM_PEERS;
ar->max_num_stations = TARGET_10X_TX_STATS_NUM_STATIONS;
} else {
@@ -1538,9 +1555,15 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
ar->num_active_peers = TARGET_10_4_ACTIVE_PEERS;
ar->max_num_vdevs = TARGET_10_4_NUM_VDEVS;
ar->num_tids = TARGET_10_4_TGT_NUM_TIDS;
- ar->htt.max_num_pending_tx = ar->hw_params.num_msdu_desc;
- ar->fw_stats_req_mask = WMI_STAT_PEER;
+ ar->fw_stats_req_mask = WMI_10_4_STAT_PEER |
+ WMI_10_4_STAT_PEER_EXTD;
ar->max_spatial_stream = ar->hw_params.max_spatial_stream;
+
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ fw_file->fw_features))
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC_PFC;
+ else
+ ar->htt.max_num_pending_tx = TARGET_10_4_NUM_MSDU_DESC;
break;
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
@@ -1551,18 +1574,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
/* Backwards compatibility for firmwares without
* ATH10K_FW_IE_HTT_OP_VERSION.
*/
- if (ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
- switch (ar->wmi.op_version) {
+ if (fw_file->htt_op_version == ATH10K_FW_HTT_OP_VERSION_UNSET) {
+ switch (fw_file->wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_MAIN;
break;
case ATH10K_FW_WMI_OP_VERSION_10_1:
case ATH10K_FW_WMI_OP_VERSION_10_2:
case ATH10K_FW_WMI_OP_VERSION_10_2_4:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
break;
case ATH10K_FW_WMI_OP_VERSION_TLV:
- ar->htt.op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_TLV;
break;
case ATH10K_FW_WMI_OP_VERSION_10_4:
case ATH10K_FW_WMI_OP_VERSION_UNSET:
@@ -1575,14 +1598,18 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
return 0;
}
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw)
{
int status;
+ u32 val;
lockdep_assert_held(&ar->conf_mutex);
clear_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags);
+ ar->running_fw = fw;
+
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
@@ -1601,7 +1628,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
* to set the clock source once the target is initialized.
*/
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
status = ath10k_bmi_write32(ar, hi_skip_clock_init, 1);
if (status) {
ath10k_err(ar, "could not write to skip_clock_init: %d\n",
@@ -1610,7 +1637,7 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
}
- status = ath10k_download_fw(ar, mode);
+ status = ath10k_download_fw(ar);
if (status)
goto err;
@@ -1698,6 +1725,23 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "firmware %s booted\n",
ar->hw->wiphy->fw_version);
+ if (test_bit(WMI_SERVICE_EXT_RES_CFG_SUPPORT, ar->wmi.svc_map)) {
+ val = 0;
+ if (ath10k_peer_stats_enabled(ar))
+ val = WMI_10_4_PEER_STATS;
+
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ val |= WMI_10_4_BSS_CHANNEL_INFO_64;
+
+ status = ath10k_mac_ext_resource_config(ar, val);
+ if (status) {
+ ath10k_err(ar,
+ "failed to send ext resource cfg command : %d\n",
+ status);
+ goto err_hif_stop;
+ }
+ }
+
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err(ar, "could not send WMI init command (%d)\n",
@@ -1832,13 +1876,27 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
goto err_power_down;
}
+ BUILD_BUG_ON(sizeof(ar->hw->wiphy->fw_version) !=
+ sizeof(ar->normal_mode_fw.fw_file.fw_version));
+ memcpy(ar->hw->wiphy->fw_version, ar->normal_mode_fw.fw_file.fw_version,
+ sizeof(ar->hw->wiphy->fw_version));
+
ath10k_debug_print_hwfw_info(ar);
+ ret = ath10k_core_pre_cal_download(ar);
+ if (ret) {
+ /* pre calibration data download is not necessary
+ * for all the chipsets. Ignore failures and continue.
+ */
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
+ "could not load pre cal data: %d\n", ret);
+ }
+
ret = ath10k_core_get_board_id_from_otp(ar);
if (ret && ret != -EOPNOTSUPP) {
ath10k_err(ar, "failed to get board id from otp: %d\n",
ret);
- return ret;
+ goto err_free_firmware_files;
}
ret = ath10k_core_fetch_board_file(ar);
@@ -1865,7 +1923,8 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
mutex_lock(&ar->conf_mutex);
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "could not init core (%d)\n", ret);
goto err_unlock;
@@ -2035,6 +2094,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
init_completion(&ar->install_key_done);
init_completion(&ar->vdev_setup_done);
init_completion(&ar->thermal.wmi_sync);
+ init_completion(&ar->bss_survey_done);
INIT_DELAYED_WORK(&ar->scan.timeout, ath10k_scan_timeout_work);
@@ -2048,7 +2108,9 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
mutex_init(&ar->conf_mutex);
spin_lock_init(&ar->data_lock);
+ spin_lock_init(&ar->txqs_lock);
+ INIT_LIST_HEAD(&ar->txqs);
INIT_LIST_HEAD(&ar->peers);
init_waitqueue_head(&ar->peer_mapping_wq);
init_waitqueue_head(&ar->htt.empty_tx_wq);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index a62b62a62..1852e0ee3 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -44,8 +44,8 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
-#define ATH10K_CONNECTION_LOSS_HZ (3*HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH10K_NUM_CHANS 39
/* Antenna noise floor */
@@ -98,6 +98,7 @@ struct ath10k_skb_cb {
u8 eid;
u16 msdu_id;
struct ieee80211_vif *vif;
+ struct ieee80211_txq *txq;
} __packed;
struct ath10k_skb_rxcb {
@@ -138,7 +139,6 @@ struct ath10k_mem_chunk {
};
struct ath10k_wmi {
- enum ath10k_fw_wmi_op_version op_version;
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
@@ -297,6 +297,9 @@ struct ath10k_dfs_stats {
struct ath10k_peer {
struct list_head list;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+
int vdev_id;
u8 addr[ETH_ALEN];
DECLARE_BITMAP(peer_ids, ATH10K_MAX_NUM_PEER_IDS);
@@ -305,6 +308,12 @@ struct ath10k_peer {
struct ieee80211_key_conf *keys[WMI_MAX_KEY_INDEX + 1];
};
+struct ath10k_txq {
+ struct list_head list;
+ unsigned long num_fw_queued;
+ unsigned long num_push_allowed;
+};
+
struct ath10k_sta {
struct ath10k_vif *arvif;
@@ -313,6 +322,7 @@ struct ath10k_sta {
u32 bw;
u32 nss;
u32 smps;
+ u16 peer_id;
struct work_struct update_wk;
@@ -323,7 +333,7 @@ struct ath10k_sta {
#endif
};
-#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
+#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5 * HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
@@ -335,6 +345,7 @@ struct ath10k_vif {
struct list_head list;
u32 vdev_id;
+ u16 peer_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
u32 beacon_interval;
@@ -549,12 +560,17 @@ enum ath10k_dev_flags {
/* Bluetooth coexistance enabled */
ATH10K_FLAG_BTCOEX,
+
+ /* Per Station statistics service */
+ ATH10K_FLAG_PEER_STATS,
};
enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
ATH10K_CAL_MODE_DT,
+ ATH10K_PRE_CAL_MODE_FILE,
+ ATH10K_PRE_CAL_MODE_DT,
};
enum ath10k_crypt_mode {
@@ -573,6 +589,10 @@ static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
return "otp";
case ATH10K_CAL_MODE_DT:
return "dt";
+ case ATH10K_PRE_CAL_MODE_FILE:
+ return "pre-cal-file";
+ case ATH10K_PRE_CAL_MODE_DT:
+ return "pre-cal-dt";
}
return "unknown";
@@ -606,6 +626,34 @@ enum ath10k_tx_pause_reason {
ATH10K_TX_PAUSE_MAX,
};
+struct ath10k_fw_file {
+ const struct firmware *firmware;
+
+ char fw_version[ETHTOOL_FWVERS_LEN];
+
+ DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
+
+ enum ath10k_fw_wmi_op_version wmi_op_version;
+ enum ath10k_fw_htt_op_version htt_op_version;
+
+ const void *firmware_data;
+ size_t firmware_len;
+
+ const void *otp_data;
+ size_t otp_len;
+
+ const void *codeswap_data;
+ size_t codeswap_len;
+};
+
+struct ath10k_fw_components {
+ const struct firmware *board;
+ const void *board_data;
+ size_t board_len;
+
+ struct ath10k_fw_file fw_file;
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -631,8 +679,6 @@ struct ath10k {
/* protected by conf_mutex */
bool ani_enabled;
- DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
-
bool p2p;
struct {
@@ -680,39 +726,31 @@ struct ath10k {
/* The padding bytes's location is different on various chips */
enum ath10k_hw_4addr_pad hw_4addr_pad;
- u32 num_msdu_desc;
- u32 qcache_active_peers;
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 max_spatial_stream;
+ u32 cal_data_len;
struct ath10k_hw_params_fw {
const char *dir;
- const char *fw;
- const char *otp;
const char *board;
size_t board_size;
size_t board_ext_size;
} fw;
} hw_params;
- const struct firmware *board;
- const void *board_data;
- size_t board_len;
-
- const struct firmware *otp;
- const void *otp_data;
- size_t otp_len;
+ /* contains the firmware images used with ATH10K_FIRMWARE_MODE_NORMAL */
+ struct ath10k_fw_components normal_mode_fw;
- const struct firmware *firmware;
- const void *firmware_data;
- size_t firmware_len;
+ /* READ-ONLY images of the running firmware, which can be either
+ * normal or UTF. Do not modify, release etc!
+ */
+ const struct ath10k_fw_components *running_fw;
+ const struct firmware *pre_cal_file;
const struct firmware *cal_file;
struct {
- const void *firmware_codeswap_data;
- size_t firmware_codeswap_len;
struct ath10k_swap_code_seg_info *firmware_swap_code_seg_info;
} swap;
@@ -744,7 +782,7 @@ struct ath10k {
} scan;
struct {
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
} mac;
/* should never be NULL; needed for regular htt rx */
@@ -756,6 +794,9 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
+ /* currently configured operating channel in firmware */
+ struct ieee80211_channel *tgt_oper_chan;
+
unsigned long long free_vdev_map;
struct ath10k_vif *monitor_arvif;
bool monitor;
@@ -786,9 +827,13 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
+ /* protects: ar->txqs, artxq->list */
+ spinlock_t txqs_lock;
+ struct list_head txqs;
struct list_head arvifs;
struct list_head peers;
+ struct ath10k_peer *peer_map[ATH10K_MAX_NUM_PEER_IDS];
wait_queue_head_t peer_mapping_wq;
/* protected by conf_mutex */
@@ -831,6 +876,7 @@ struct ath10k {
* avoid reporting garbage data.
*/
bool ch_info_can_report_survey;
+ struct completion bss_survey_done;
struct dfs_pattern_detector *dfs_detector;
@@ -838,8 +884,6 @@ struct ath10k {
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
-#endif
-
struct {
/* relay(fs) channel for spectral scan */
struct rchan *rfs_chan_spec_scan;
@@ -848,16 +892,12 @@ struct ath10k {
enum ath10k_spectral_mode mode;
struct ath10k_spec_scan config;
} spectral;
+#endif
struct {
/* protected by conf_mutex */
- const struct firmware *utf;
- char utf_version[32];
- const void *utf_firmware_data;
- size_t utf_firmware_len;
- DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
- enum ath10k_fw_wmi_op_version orig_wmi_op_version;
- enum ath10k_fw_wmi_op_version op_version;
+ struct ath10k_fw_components utf_mode_fw;
+
/* protected by data_lock */
bool utf_monitor;
} testmode;
@@ -876,6 +916,15 @@ struct ath10k {
u8 drv_priv[0] __aligned(sizeof(void *));
};
+static inline bool ath10k_peer_stats_enabled(struct ath10k *ar)
+{
+ if (test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) &&
+ test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ return true;
+
+ return false;
+}
+
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
@@ -884,8 +933,11 @@ void ath10k_core_destroy(struct ath10k *ar);
void ath10k_core_get_fw_features_str(struct ath10k *ar,
char *buf,
size_t max_len);
+int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name,
+ struct ath10k_fw_file *fw_file);
-int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode);
+int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
+ const struct ath10k_fw_components *fw_components);
int ath10k_wait_for_suspend(struct ath10k *ar, u32 suspend_opt);
void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar, u32 chip_id);
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0f834646e..e2511550f 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -126,7 +126,9 @@ EXPORT_SYMBOL(ath10k_info);
void ath10k_debug_print_hwfw_info(struct ath10k *ar)
{
+ const struct firmware *firmware;
char fw_features[128] = {};
+ u32 crc = 0;
ath10k_core_get_fw_features_str(ar, fw_features, sizeof(fw_features));
@@ -143,11 +145,15 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
config_enabled(CONFIG_NL80211_TESTMODE));
+ firmware = ar->normal_mode_fw.fw_file.firmware;
+ if (firmware)
+ crc = crc32_le(0, firmware->data, firmware->size);
+
ath10k_info(ar, "firmware ver %s api %d features %s crc32 %08x\n",
ar->hw->wiphy->fw_version,
ar->fw_api,
fw_features,
- crc32_le(0, ar->firmware->data, ar->firmware->size));
+ crc);
}
void ath10k_debug_print_board_info(struct ath10k *ar)
@@ -163,7 +169,8 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
ar->bd_api,
boardinfo,
- crc32_le(0, ar->board->data, ar->board->size));
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
}
void ath10k_debug_print_boot_info(struct ath10k *ar)
@@ -171,8 +178,8 @@ void ath10k_debug_print_boot_info(struct ath10k *ar)
ath10k_info(ar, "htt-ver %d.%d wmi-op %d htt-op %d cal %s max-sta %d raw %d hwcrypto %d\n",
ar->htt.target_version_major,
ar->htt.target_version_minor,
- ar->wmi.op_version,
- ar->htt.op_version,
+ ar->normal_mode_fw.fw_file.wmi_op_version,
+ ar->normal_mode_fw.fw_file.htt_op_version,
ath10k_cal_mode_str(ar->cal_mode),
ar->max_num_stations,
test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags),
@@ -319,7 +326,7 @@ static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
- bool is_start, is_started, is_end, peer_stats_svc;
+ bool is_start, is_started, is_end;
size_t num_peers;
size_t num_vdevs;
int ret;
@@ -346,13 +353,11 @@ void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
* b) consume stat update events until another one with pdev stats is
* delivered which is treated as end-of-data and is itself discarded
*/
-
- peer_stats_svc = test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map);
- if (peer_stats_svc)
+ if (ath10k_peer_stats_enabled(ar))
ath10k_sta_update_rx_duration(ar, &stats.peers);
if (ar->debug.fw_stats_done) {
- if (!peer_stats_svc)
+ if (!ath10k_peer_stats_enabled(ar))
ath10k_warn(ar, "received unsolicited stats update event\n");
goto free;
@@ -1447,7 +1452,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
goto err;
}
- buf = vmalloc(QCA988X_CAL_DATA_LEN);
+ buf = vmalloc(ar->hw_params.cal_data_len);
if (!buf) {
ret = -ENOMEM;
goto err;
@@ -1462,7 +1467,7 @@ static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
}
ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
- QCA988X_CAL_DATA_LEN);
+ ar->hw_params.cal_data_len);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
goto err_vfree;
@@ -1487,10 +1492,11 @@ static ssize_t ath10k_debug_cal_data_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
+ struct ath10k *ar = file->private_data;
void *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
- buf, QCA988X_CAL_DATA_LEN);
+ buf, ar->hw_params.cal_data_len);
}
static int ath10k_debug_cal_data_release(struct inode *inode,
@@ -2119,7 +2125,7 @@ static ssize_t ath10k_write_btcoex(struct file *file,
struct ath10k *ar = file->private_data;
char buf[32];
size_t buf_size;
- int ret = 0;
+ int ret;
bool val;
buf_size = min(count, (sizeof(buf) - 1));
@@ -2139,8 +2145,10 @@ static ssize_t ath10k_write_btcoex(struct file *file,
goto exit;
}
- if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val))
+ if (!(test_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags) ^ val)) {
+ ret = count;
goto exit;
+ }
if (val)
set_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
@@ -2179,6 +2187,75 @@ static const struct file_operations fops_btcoex = {
.open = simple_open
};
+static ssize_t ath10k_write_peer_stats(struct file *file,
+ const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32];
+ size_t buf_size;
+ int ret;
+ bool val;
+
+ buf_size = min(count, (sizeof(buf) - 1));
+ if (copy_from_user(buf, ubuf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+
+ if (strtobool(buf, &val) != 0)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ if (!(test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags) ^ val)) {
+ ret = count;
+ goto exit;
+ }
+
+ if (val)
+ set_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+ else
+ clear_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags);
+
+ ath10k_info(ar, "restarting firmware due to Peer stats change");
+
+ queue_work(ar->workqueue, &ar->restart_work);
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static ssize_t ath10k_read_peer_stats(struct file *file, char __user *ubuf,
+ size_t count, loff_t *ppos)
+
+{
+ char buf[32];
+ struct ath10k *ar = file->private_data;
+ int len = 0;
+
+ mutex_lock(&ar->conf_mutex);
+ len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+ test_bit(ATH10K_FLAG_PEER_STATS, &ar->dev_flags));
+ mutex_unlock(&ar->conf_mutex);
+
+ return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_peer_stats = {
+ .read = ath10k_read_peer_stats,
+ .write = ath10k_write_peer_stats,
+ .open = simple_open
+};
+
static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
@@ -2196,23 +2273,28 @@ static ssize_t ath10k_debug_fw_checksums_read(struct file *file,
len += scnprintf(buf + len, buf_len - len,
"firmware-N.bin\t\t%08x\n",
- crc32_le(0, ar->firmware->data, ar->firmware->size));
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware->data,
+ ar->normal_mode_fw.fw_file.firmware->size));
len += scnprintf(buf + len, buf_len - len,
"athwlan\t\t\t%08x\n",
- crc32_le(0, ar->firmware_data, ar->firmware_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.firmware_data,
+ ar->normal_mode_fw.fw_file.firmware_len));
len += scnprintf(buf + len, buf_len - len,
"otp\t\t\t%08x\n",
- crc32_le(0, ar->otp_data, ar->otp_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.otp_data,
+ ar->normal_mode_fw.fw_file.otp_len));
len += scnprintf(buf + len, buf_len - len,
"codeswap\t\t%08x\n",
- crc32_le(0, ar->swap.firmware_codeswap_data,
- ar->swap.firmware_codeswap_len));
+ crc32_le(0, ar->normal_mode_fw.fw_file.codeswap_data,
+ ar->normal_mode_fw.fw_file.codeswap_len));
len += scnprintf(buf + len, buf_len - len,
"board-N.bin\t\t%08x\n",
- crc32_le(0, ar->board->data, ar->board->size));
+ crc32_le(0, ar->normal_mode_fw.board->data,
+ ar->normal_mode_fw.board->size));
len += scnprintf(buf + len, buf_len - len,
"board\t\t\t%08x\n",
- crc32_le(0, ar->board_data, ar->board_len));
+ crc32_le(0, ar->normal_mode_fw.board_data,
+ ar->normal_mode_fw.board_len));
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
@@ -2342,6 +2424,11 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("btcoex", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_btcoex);
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ debugfs_create_file("peer_stats", S_IRUGO | S_IWUSR,
+ ar->debug.debugfs_phy, ar,
+ &fops_peer_stats);
+
debugfs_create_file("fw_checksums", S_IRUSR,
ar->debug.debugfs_phy, ar, &fops_fw_checksums);
diff --git a/drivers/net/wireless/ath/ath10k/debug.h b/drivers/net/wireless/ath/ath10k/debug.h
index 6206edd7c..75c89e362 100644
--- a/drivers/net/wireless/ath/ath10k/debug.h
+++ b/drivers/net/wireless/ath/ath10k/debug.h
@@ -57,7 +57,7 @@ enum ath10k_dbg_aggr_mode {
};
/* FIXME: How to calculate the buffer size sanely? */
-#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
+#define ATH10K_FW_STATS_BUF_SIZE (1024 * 1024)
extern unsigned int ath10k_debug_mask;
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index e70aa38e6..cc827185d 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -297,10 +297,10 @@ struct ath10k_htc_svc_conn_resp {
#define ATH10K_NUM_CONTROL_TX_BUFFERS 2
#define ATH10K_HTC_MAX_LEN 4096
#define ATH10K_HTC_MAX_CTRL_MSG_LEN 256
-#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_WAIT_TIMEOUT_HZ (1 * HZ)
#define ATH10K_HTC_CONTROL_BUFFER_SIZE (ATH10K_HTC_MAX_CTRL_MSG_LEN + \
sizeof(struct ath10k_htc_hdr))
-#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1*HZ)
+#define ATH10K_HTC_CONN_SVC_TIMEOUT_HZ (1 * HZ)
struct ath10k_htc_ep {
struct ath10k_htc *htc;
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 7561f22f1..130cd9502 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -149,7 +149,7 @@ int ath10k_htt_connect(struct ath10k_htt *htt)
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htt_htc_tx_complete;
- conn_req.ep_ops.ep_rx_complete = ath10k_htt_t2h_msg_handler;
+ conn_req.ep_ops.ep_rx_complete = ath10k_htt_htc_t2h_msg_handler;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
@@ -183,7 +183,7 @@ int ath10k_htt_init(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
- switch (ar->htt.op_version) {
+ switch (ar->running_fw->fw_file.htt_op_version) {
case ATH10K_FW_HTT_OP_VERSION_10_4:
ar->htt.t2h_msg_types = htt_10_4_t2h_msg_types;
ar->htt.t2h_msg_types_max = HTT_10_4_T2H_NUM_MSGS;
@@ -208,7 +208,7 @@ int ath10k_htt_init(struct ath10k *ar)
return 0;
}
-#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
+#define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
static int ath10k_htt_verify_version(struct ath10k_htt *htt)
{
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index 13391ea44..911c535d0 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/dmapool.h>
#include <linux/hashtable.h>
+#include <linux/kfifo.h>
#include <net/mac80211.h>
#include "htc.h"
@@ -1461,15 +1462,23 @@ struct htt_tx_mode_switch_ind {
struct htt_tx_mode_switch_record records[0];
} __packed;
+struct htt_channel_change {
+ u8 pad[3];
+ __le32 freq;
+ __le32 center_freq1;
+ __le32 center_freq2;
+ __le32 phymode;
+} __packed;
+
union htt_rx_pn_t {
/* WEP: 24-bit PN */
u32 pn24;
/* TKIP or CCMP: 48-bit PN */
- u_int64_t pn48;
+ u64 pn48;
/* WAPI: 128-bit PN */
- u_int64_t pn128[2];
+ u64 pn128[2];
};
struct htt_cmd {
@@ -1511,16 +1520,22 @@ struct htt_resp {
struct htt_tx_fetch_ind tx_fetch_ind;
struct htt_tx_fetch_confirm tx_fetch_confirm;
struct htt_tx_mode_switch_ind tx_mode_switch_ind;
+ struct htt_channel_change chan_change;
};
} __packed;
/*** host side structures follow ***/
struct htt_tx_done {
- u32 msdu_id;
- bool discard;
- bool no_ack;
- bool success;
+ u16 msdu_id;
+ u16 status;
+};
+
+enum htt_tx_compl_state {
+ HTT_TX_COMPL_STATE_NONE,
+ HTT_TX_COMPL_STATE_ACK,
+ HTT_TX_COMPL_STATE_NOACK,
+ HTT_TX_COMPL_STATE_DISCARD,
};
struct htt_peer_map_event {
@@ -1547,7 +1562,6 @@ struct ath10k_htt {
u8 target_version_major;
u8 target_version_minor;
struct completion target_version_received;
- enum ath10k_fw_htt_op_version op_version;
u8 max_num_amsdu;
u8 max_num_ampdu;
@@ -1641,17 +1655,20 @@ struct ath10k_htt {
struct idr pending_tx;
wait_queue_head_t empty_tx_wq;
+ /* FIFO for storing tx done status {ack, no-ack, discard} and msdu id */
+ DECLARE_KFIFO_PTR(txdone_fifo, struct htt_tx_done);
+
/* set if host-fw communication goes haywire
* used to avoid further failures */
bool rx_confused;
- struct tasklet_struct rx_replenish_task;
+ atomic_t num_mpdus_ready;
/* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls */
struct tasklet_struct txrx_compl_task;
- struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
+ struct sk_buff_head tx_fetch_ind_q;
/* rx_status template */
struct ieee80211_rx_status rx_status;
@@ -1667,10 +1684,13 @@ struct ath10k_htt {
} txbuf;
struct {
+ bool enabled;
struct htt_q_state *vaddr;
dma_addr_t paddr;
+ u16 num_push_allowed;
u16 num_peers;
u16 num_tids;
+ enum htt_tx_mode_switch_mode mode;
enum htt_q_depth_type type;
} tx_q_state;
};
@@ -1715,7 +1735,7 @@ struct htt_rx_desc {
/* Refill a bunch of RX buffers for each refill round so that FW/HW can handle
* aggregated traffic more nicely. */
-#define ATH10K_HTT_MAX_NUM_REFILL 16
+#define ATH10K_HTT_MAX_NUM_REFILL 100
/*
* DMA_MAP expects the buffer to be an integral number of cache lines.
@@ -1743,7 +1763,8 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb);
int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt);
int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie);
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt);
@@ -1752,8 +1773,23 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_ampdu,
u8 max_subfrms_amsdu);
void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb);
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records);
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+void ath10k_htt_tx_txq_sync(struct ath10k *ar);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp);
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ae9b686a4..813cdd262 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -31,6 +31,8 @@
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
+#define HTT_RX_RING_REFILL_RESCHED_MS 5
+
static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb);
static void ath10k_htt_txrx_compl_task(unsigned long ptr);
@@ -192,7 +194,8 @@ static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
- tasklet_schedule(&htt->rx_replenish_task);
+ mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
+ msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
}
spin_unlock_bh(&htt->rx_ring.lock);
}
@@ -223,12 +226,11 @@ int ath10k_htt_rx_ring_refill(struct ath10k *ar)
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
del_timer_sync(&htt->rx_ring.refill_retry_timer);
- tasklet_kill(&htt->rx_replenish_task);
tasklet_kill(&htt->txrx_compl_task);
- skb_queue_purge(&htt->tx_compl_q);
skb_queue_purge(&htt->rx_compl_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
+ skb_queue_purge(&htt->tx_fetch_ind_q);
ath10k_htt_rx_ring_free(htt);
@@ -281,7 +283,6 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
- u8 **fw_desc, int *fw_desc_len,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
@@ -323,48 +324,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return -EIO;
}
- /*
- * Copy the FW rx descriptor for this MSDU from the rx
- * indication message into the MSDU's netbuf. HL uses the
- * same rx indication message definition as LL, and simply
- * appends new info (fields from the HW rx desc, and the
- * MSDU payload itself). So, the offset into the rx
- * indication message only has to account for the standard
- * offset of the per-MSDU FW rx desc info within the
- * message, and how many bytes of the per-MSDU FW rx desc
- * info have already been consumed. (And the endianness of
- * the host, since for a big-endian host, the rx ind
- * message contents, including the per-MSDU rx desc bytes,
- * were byteswapped during upload.)
- */
- if (*fw_desc_len > 0) {
- rx_desc->fw_desc.info0 = **fw_desc;
- /*
- * The target is expected to only provide the basic
- * per-MSDU rx descriptors. Just to be sure, verify
- * that the target has not attached extension data
- * (e.g. LRO flow ID).
- */
-
- /* or more, if there's extension data */
- (*fw_desc)++;
- (*fw_desc_len)--;
- } else {
- /*
- * When an oversized AMSDU happened, FW will lost
- * some of MSDU status - in this case, the FW
- * descriptors provided will be less than the
- * actual MSDUs inside this MPDU. Mark the FW
- * descriptors so that it will still deliver to
- * upper stack, if no CRC error for this MPDU.
- *
- * FIX THIS - the FW descriptors are actually for
- * MSDUs in the end of this A-MSDU instead of the
- * beginning.
- */
- rx_desc->fw_desc.info0 = 0;
- }
-
msdu_len_invalid = !!(__le32_to_cpu(rx_desc->attention.flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
@@ -423,13 +382,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
return msdu_chaining;
}
-static void ath10k_htt_rx_replenish_task(unsigned long ptr)
-{
- struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
-
- ath10k_htt_rx_msdu_buff_replenish(htt);
-}
-
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u32 paddr)
{
@@ -563,12 +515,10 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
- tasklet_init(&htt->rx_replenish_task, ath10k_htt_rx_replenish_task,
- (unsigned long)htt);
-
- skb_queue_head_init(&htt->tx_compl_q);
skb_queue_head_init(&htt->rx_compl_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
+ skb_queue_head_init(&htt->tx_fetch_ind_q);
+ atomic_set(&htt->num_mpdus_ready, 0);
tasklet_init(&htt->txrx_compl_task, ath10k_htt_txrx_compl_task,
(unsigned long)htt);
@@ -860,6 +810,8 @@ static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
+ if (!ch)
+ ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (!ch)
@@ -979,7 +931,7 @@ static void ath10k_process_rx(struct ath10k *ar,
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA,
- "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %s %s sn %u %s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%llx fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
@@ -1014,7 +966,7 @@ static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
- ar->fw_features))
+ ar->running_fw->fw_file.fw_features))
len = round_up(len, 4);
return len;
@@ -1076,20 +1028,25 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- skb_trim(msdu, msdu->len - ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ if (status->flag & RX_FLAG_IV_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_tail_len(ar, enctype));
/* MMIC */
- if (!ieee80211_has_morefrags(hdr->frame_control) &&
+ if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
+ !ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
skb_trim(msdu, msdu->len - 8);
/* Head */
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+ crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
- memmove((void *)msdu->data + crypto_len,
- (void *)msdu->data, hdr_len);
- skb_pull(msdu, crypto_len);
+ memmove((void *)msdu->data + crypto_len,
+ (void *)msdu->data, hdr_len);
+ skb_pull(msdu, crypto_len);
+ }
}
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
@@ -1343,6 +1300,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
bool has_tkip_err;
bool has_peer_idx_invalid;
bool is_decrypted;
+ bool is_mgmt;
u32 attention;
if (skb_queue_empty(amsdu))
@@ -1351,6 +1309,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
first = skb_peek(amsdu);
rxd = (void *)first->data - sizeof(*rxd);
+ is_mgmt = !!(rxd->attention.flags &
+ __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
+
enctype = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
@@ -1392,6 +1353,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
+ RX_FLAG_ONLY_MONITOR |
RX_FLAG_MMIC_STRIPPED);
if (has_fcs_err)
@@ -1400,10 +1362,21 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (has_tkip_err)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (is_decrypted)
- status->flag |= RX_FLAG_DECRYPTED |
- RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
+ /* Firmware reports all necessary management frames via WMI already.
+ * They are not reported to monitor interfaces at all so pass the ones
+ * coming via HTT to monitor interfaces instead. This simplifies
+ * matters a lot.
+ */
+ if (is_mgmt)
+ status->flag |= RX_FLAG_ONLY_MONITOR;
+
+ if (is_decrypted) {
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ if (likely(!is_mgmt))
+ status->flag |= RX_FLAG_IV_STRIPPED |
+ RX_FLAG_MMIC_STRIPPED;
+}
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1416,6 +1389,8 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
*/
if (!is_decrypted)
continue;
+ if (is_mgmt)
+ continue;
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
@@ -1516,14 +1491,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
- struct sk_buff *msdu;
- struct htt_rx_desc *rxd;
- bool is_mgmt;
- bool has_fcs_err;
-
- msdu = skb_peek(amsdu);
- rxd = (void *)msdu->data - sizeof(*rxd);
-
/* FIXME: It might be a good idea to do some fuzzy-testing to drop
* invalid/dangerous frames.
*/
@@ -1533,23 +1500,6 @@ static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
return false;
}
- is_mgmt = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
- has_fcs_err = !!(rxd->attention.flags &
- __cpu_to_le32(RX_ATTENTION_FLAGS_FCS_ERR));
-
- /* Management frames are handled via WMI events. The pros of such
- * approach is that channel is explicitly provided in WMI events
- * whereas HTT doesn't provide channel information for Rxed frames.
- *
- * However some firmware revisions don't report corrupted frames via
- * WMI so don't drop them.
- */
- if (is_mgmt && !has_fcs_err) {
- ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx mgmt ctrl\n");
- return false;
- }
-
if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
@@ -1571,25 +1521,49 @@ static void ath10k_htt_rx_h_filter(struct ath10k *ar,
__skb_queue_purge(amsdu);
}
-static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
- struct htt_rx_indication *rx)
+static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
- struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ static struct ieee80211_rx_status rx_status;
struct sk_buff_head amsdu;
- int num_mpdu_ranges;
- int fw_desc_len;
- u8 *fw_desc;
- int i, ret, mpdu_count = 0;
+ int ret;
- lockdep_assert_held(&htt->rx_ring.lock);
+ __skb_queue_head_init(&amsdu);
- if (htt->rx_confused)
- return;
+ spin_lock_bh(&htt->rx_ring.lock);
+ if (htt->rx_confused) {
+ spin_unlock_bh(&htt->rx_ring.lock);
+ return -EIO;
+ }
+ ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
+ spin_unlock_bh(&htt->rx_ring.lock);
- fw_desc_len = __le16_to_cpu(rx->prefix.fw_rx_desc_bytes);
- fw_desc = (u8 *)&rx->fw_desc;
+ if (ret < 0) {
+ ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
+ __skb_queue_purge(&amsdu);
+ /* FIXME: It's probably a good idea to reboot the
+ * device instead of leaving it inoperable.
+ */
+ htt->rx_confused = true;
+ return ret;
+ }
+
+ ath10k_htt_rx_h_ppdu(ar, &amsdu, &rx_status, 0xffff);
+ ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
+ ath10k_htt_rx_h_filter(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, &rx_status);
+ ath10k_htt_rx_h_deliver(ar, &amsdu, &rx_status);
+
+ return 0;
+}
+
+static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
+ struct htt_rx_indication *rx)
+{
+ struct ath10k *ar = htt->ar;
+ struct htt_rx_indication_mpdu_range *mpdu_ranges;
+ int num_mpdu_ranges;
+ int i, mpdu_count = 0;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
@@ -1603,80 +1577,19 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count;
- while (mpdu_count--) {
- __skb_queue_head_init(&amsdu);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc,
- &fw_desc_len, &amsdu);
- if (ret < 0) {
- ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
- __skb_queue_purge(&amsdu);
- /* FIXME: It's probably a good idea to reboot the
- * device instead of leaving it inoperable.
- */
- htt->rx_confused = true;
- break;
- }
+ atomic_add(mpdu_count, &htt->num_mpdus_ready);
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
- }
-
- tasklet_schedule(&htt->rx_replenish_task);
+ tasklet_schedule(&htt->txrx_compl_task);
}
-static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
- struct htt_rx_fragment_indication *frag)
+static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt)
{
- struct ath10k *ar = htt->ar;
- struct ieee80211_rx_status *rx_status = &htt->rx_status;
- struct sk_buff_head amsdu;
- int ret;
- u8 *fw_desc;
- int fw_desc_len;
-
- fw_desc_len = __le16_to_cpu(frag->fw_rx_desc_bytes);
- fw_desc = (u8 *)frag->fw_msdu_rx_desc;
-
- __skb_queue_head_init(&amsdu);
+ atomic_inc(&htt->num_mpdus_ready);
- spin_lock_bh(&htt->rx_ring.lock);
- ret = ath10k_htt_rx_amsdu_pop(htt, &fw_desc, &fw_desc_len,
- &amsdu);
- spin_unlock_bh(&htt->rx_ring.lock);
-
- tasklet_schedule(&htt->rx_replenish_task);
-
- ath10k_dbg(ar, ATH10K_DBG_HTT_DUMP, "htt rx frag ahead\n");
-
- if (ret) {
- ath10k_warn(ar, "failed to pop amsdu from httr rx ring for fragmented rx %d\n",
- ret);
- __skb_queue_purge(&amsdu);
- return;
- }
-
- if (skb_queue_len(&amsdu) != 1) {
- ath10k_warn(ar, "failed to pop frag amsdu: too many msdus\n");
- __skb_queue_purge(&amsdu);
- return;
- }
-
- ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
- ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
-
- if (fw_desc_len > 0) {
- ath10k_dbg(ar, ATH10K_DBG_HTT,
- "expecting more fragmented rx in one indication %d\n",
- fw_desc_len);
- }
+ tasklet_schedule(&htt->txrx_compl_task);
}
-static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
+static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
@@ -1688,19 +1601,19 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_DATA_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
default:
ath10k_warn(ar, "unhandled tx completion status %d\n", status);
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
@@ -1710,7 +1623,20 @@ static void ath10k_htt_rx_frm_tx_compl(struct ath10k *ar,
for (i = 0; i < resp->data_tx_completion.num_msdus; i++) {
msdu_id = resp->data_tx_completion.msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
- ath10k_txrx_tx_unref(htt, &tx_done);
+
+ /* kfifo_put: In practice firmware shouldn't fire off per-CE
+ * interrupt and main interrupt (MSI/-X range case) for the same
+ * HTC service so it should be safe to use kfifo_put w/o lock.
+ *
+ * From kfifo_put() documentation:
+ * Note that with only one concurrent reader and one concurrent
+ * writer, you don't need extra locking to use these macro.
+ */
+ if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
+ ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
+ tx_done.msdu_id, tx_done.status);
+ ath10k_txrx_tx_unref(htt, &tx_done);
+ }
}
}
@@ -1978,11 +1904,323 @@ static void ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return;
}
}
+}
+
+static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
+ const __le32 *resp_ids,
+ int num_resp_ids)
+{
+ int i;
+ u32 resp_id;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
+ num_resp_ids);
+
+ for (i = 0; i < num_resp_ids; i++) {
+ resp_id = le32_to_cpu(resp_ids[i]);
- tasklet_schedule(&htt->rx_replenish_task);
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
+ resp_id);
+
+ /* TODO: free resp_id */
+ }
}
-void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct htt_resp *resp = (struct htt_resp *)skb->data;
+ struct htt_tx_fetch_record *record;
+ size_t len;
+ size_t max_num_bytes;
+ size_t max_num_msdus;
+ size_t num_bytes;
+ size_t num_msdus;
+ const __le32 *resp_ids;
+ u16 num_records;
+ u16 num_resp_ids;
+ u16 peer_id;
+ u8 tid;
+ int ret;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
+ return;
+ }
+
+ num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
+
+ len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
+ len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
+ return;
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %hu num resps %hu seq %hu\n",
+ num_records, num_resp_ids,
+ le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
+
+ if (!ar->htt.tx_q_state.enabled) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
+ return;
+ }
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
+ ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
+ return;
+ }
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_fetch_ind.records[i];
+ peer_id = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_PEER_ID);
+ tid = MS(le16_to_cpu(record->info),
+ HTT_TX_FETCH_RECORD_INFO_TID);
+ max_num_msdus = le16_to_cpu(record->num_msdus);
+ max_num_bytes = le32_to_cpu(record->num_bytes);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %hu tid %hhu msdus %zu bytes %zu\n",
+ i, peer_id, tid, max_num_msdus, max_num_bytes);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ num_msdus = 0;
+ num_bytes = 0;
+
+ while (num_msdus < max_num_msdus &&
+ num_bytes < max_num_bytes) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+
+ num_msdus++;
+ num_bytes += ret;
+ }
+
+ record->num_msdus = cpu_to_le16(num_msdus);
+ record->num_bytes = cpu_to_le32(num_bytes);
+
+ ath10k_htt_tx_txq_recalc(hw, txq);
+ }
+
+ rcu_read_unlock();
+
+ resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
+
+ ret = ath10k_htt_tx_fetch_resp(ar,
+ resp->tx_fetch_ind.token,
+ resp->tx_fetch_ind.fetch_seq_num,
+ resp->tx_fetch_ind.records,
+ num_records);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
+ le32_to_cpu(resp->tx_fetch_ind.token), ret);
+ /* FIXME: request fw restart */
+ }
+
+ ath10k_htt_tx_txq_sync(ar);
+}
+
+static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ size_t len;
+ int num_resp_ids;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
+ return;
+ }
+
+ num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
+ len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
+ return;
+ }
+
+ ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
+ resp->tx_fetch_confirm.resp_ids,
+ num_resp_ids);
+}
+
+static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ const struct htt_resp *resp = (void *)skb->data;
+ const struct htt_tx_mode_switch_record *record;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ size_t len;
+ size_t num_records;
+ enum htt_tx_mode_switch_mode mode;
+ bool enable;
+ u16 info0;
+ u16 info1;
+ u16 threshold;
+ u16 peer_id;
+ u8 tid;
+ int i;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
+
+ len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
+ return;
+ }
+
+ info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
+ info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
+
+ enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
+ num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+ mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
+ threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt rx tx mode switch ind info0 0x%04hx info1 0x%04hx enable %d num records %zd mode %d threshold %hu\n",
+ info0, info1, enable, num_records, mode, threshold);
+
+ len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
+
+ if (unlikely(skb->len < len)) {
+ ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
+ return;
+ }
+
+ switch (mode) {
+ case HTT_TX_MODE_SWITCH_PUSH:
+ case HTT_TX_MODE_SWITCH_PUSH_PULL:
+ break;
+ default:
+ ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
+ mode);
+ return;
+ }
+
+ if (!enable)
+ return;
+
+ ar->htt.tx_q_state.enabled = enable;
+ ar->htt.tx_q_state.mode = mode;
+ ar->htt.tx_q_state.num_push_allowed = threshold;
+
+ rcu_read_lock();
+
+ for (i = 0; i < num_records; i++) {
+ record = &resp->tx_mode_switch_ind.records[i];
+ info0 = le16_to_cpu(record->info0);
+ peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
+ tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "received out of range peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
+ spin_unlock_bh(&ar->data_lock);
+
+ /* It is okay to release the lock and use txq because RCU read
+ * lock is held.
+ */
+
+ if (unlikely(!txq)) {
+ ath10k_warn(ar, "failed to lookup txq for peer_id %hu tid %hhu\n",
+ peer_id, tid);
+ continue;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq = (void *)txq->drv_priv;
+ artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ }
+
+ rcu_read_unlock();
+
+ ath10k_mac_tx_push_pending(ar);
+}
+
+static inline enum nl80211_band phy_mode_to_band(u32 phy_mode)
+{
+ enum nl80211_band band;
+
+ switch (phy_mode) {
+ case MODE_11A:
+ case MODE_11NA_HT20:
+ case MODE_11NA_HT40:
+ case MODE_11AC_VHT20:
+ case MODE_11AC_VHT40:
+ case MODE_11AC_VHT80:
+ band = NL80211_BAND_5GHZ;
+ break;
+ case MODE_11G:
+ case MODE_11B:
+ case MODE_11GONLY:
+ case MODE_11NG_HT20:
+ case MODE_11NG_HT40:
+ case MODE_11AC_VHT20_2G:
+ case MODE_11AC_VHT40_2G:
+ case MODE_11AC_VHT80_2G:
+ default:
+ band = NL80211_BAND_2GHZ;
+ }
+
+ return band;
+}
+
+void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
+{
+ bool release;
+
+ release = ath10k_htt_t2h_msg_handler(ar, skb);
+
+ /* Free the indication buffer */
+ if (release)
+ dev_kfree_skb_any(skb);
+}
+
+bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
@@ -1998,8 +2236,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
- dev_kfree_skb_any(skb);
- return;
+ return true;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
@@ -2011,9 +2248,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
- skb_queue_tail(&htt->rx_compl_q, skb);
- tasklet_schedule(&htt->txrx_compl_task);
- return;
+ ath10k_htt_rx_proc_rx_ind(htt, &resp->rx_ind);
+ break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
@@ -2034,28 +2270,33 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
struct htt_tx_done tx_done = {};
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
- tx_done.msdu_id =
- __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
+ tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
- tx_done.success = true;
+ tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_MGMT_TX_STATUS_RETRY:
- tx_done.no_ack = true;
+ tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_MGMT_TX_STATUS_DROP:
- tx_done.discard = true;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
- ath10k_txrx_tx_unref(htt, &tx_done);
+ status = ath10k_txrx_tx_unref(htt, &tx_done);
+ if (!status) {
+ spin_lock_bh(&htt->tx_lock);
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&htt->tx_lock);
+ }
+ ath10k_mac_tx_push_pending(ar);
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
- skb_queue_tail(&htt->tx_compl_q, skb);
+ ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
tasklet_schedule(&htt->txrx_compl_task);
- return;
+ break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
@@ -2071,7 +2312,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
- ath10k_htt_rx_frag_handler(htt, &resp->rx_frag_ind);
+ ath10k_htt_rx_frag_handler(htt);
break;
}
case HTT_T2H_MSG_TYPE_TEST:
@@ -2111,18 +2352,39 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
tasklet_schedule(&htt->txrx_compl_task);
- return;
+ return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND:
break;
- case HTT_T2H_MSG_TYPE_CHAN_CHANGE:
+ case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
+ u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
+ u32 freq = __le32_to_cpu(resp->chan_change.freq);
+
+ ar->tgt_oper_chan =
+ __ieee80211_get_channel(ar->hw->wiphy, freq);
+ ath10k_dbg(ar, ATH10K_DBG_HTT,
+ "htt chan change freq %u phymode %s\n",
+ freq, ath10k_wmi_phymode_str(phymode));
break;
+ }
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
- case HTT_T2H_MSG_TYPE_TX_FETCH_IND:
+ case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
+ struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
+
+ if (!tx_fetch_ind) {
+ ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
+ break;
+ }
+ skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
+ tasklet_schedule(&htt->txrx_compl_task);
+ break;
+ }
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
+ ath10k_htt_rx_tx_fetch_confirm(ar, skb);
+ break;
case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
- /* TODO: Implement pull-push logic */
+ ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
default:
@@ -2132,9 +2394,7 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
skb->data, skb->len);
break;
};
-
- /* Free the indication buffer */
- dev_kfree_skb_any(skb);
+ return true;
}
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
@@ -2150,40 +2410,47 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
{
struct ath10k_htt *htt = (struct ath10k_htt *)ptr;
struct ath10k *ar = htt->ar;
- struct sk_buff_head tx_q;
- struct sk_buff_head rx_q;
+ struct htt_tx_done tx_done = {};
struct sk_buff_head rx_ind_q;
- struct htt_resp *resp;
+ struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
+ int num_mpdus;
- __skb_queue_head_init(&tx_q);
- __skb_queue_head_init(&rx_q);
__skb_queue_head_init(&rx_ind_q);
-
- spin_lock_irqsave(&htt->tx_compl_q.lock, flags);
- skb_queue_splice_init(&htt->tx_compl_q, &tx_q);
- spin_unlock_irqrestore(&htt->tx_compl_q.lock, flags);
-
- spin_lock_irqsave(&htt->rx_compl_q.lock, flags);
- skb_queue_splice_init(&htt->rx_compl_q, &rx_q);
- spin_unlock_irqrestore(&htt->rx_compl_q.lock, flags);
+ __skb_queue_head_init(&tx_ind_q);
spin_lock_irqsave(&htt->rx_in_ord_compl_q.lock, flags);
skb_queue_splice_init(&htt->rx_in_ord_compl_q, &rx_ind_q);
spin_unlock_irqrestore(&htt->rx_in_ord_compl_q.lock, flags);
- while ((skb = __skb_dequeue(&tx_q))) {
- ath10k_htt_rx_frm_tx_compl(htt->ar, skb);
+ spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
+ skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
+ spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
+
+ /* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
+ * From kfifo_get() documentation:
+ * Note that with only one concurrent reader and one concurrent writer,
+ * you don't need extra locking to use these macro.
+ */
+ while (kfifo_get(&htt->txdone_fifo, &tx_done))
+ ath10k_txrx_tx_unref(htt, &tx_done);
+
+ while ((skb = __skb_dequeue(&tx_ind_q))) {
+ ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
- while ((skb = __skb_dequeue(&rx_q))) {
- resp = (struct htt_resp *)skb->data;
- spin_lock_bh(&htt->rx_ring.lock);
- ath10k_htt_rx_handler(htt, &resp->rx_ind);
- spin_unlock_bh(&htt->rx_ring.lock);
- dev_kfree_skb_any(skb);
+ ath10k_mac_tx_push_pending(ar);
+
+ num_mpdus = atomic_read(&htt->num_mpdus_ready);
+
+ while (num_mpdus) {
+ if (ath10k_htt_rx_handle_amsdu(htt))
+ break;
+
+ num_mpdus--;
+ atomic_dec(&htt->num_mpdus_ready);
}
while ((skb = __skb_dequeue(&rx_ind_q))) {
@@ -2192,4 +2459,6 @@ static void ath10k_htt_txrx_compl_task(unsigned long ptr)
spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
}
+
+ ath10k_htt_rx_msdu_buff_replenish(htt);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index 95acb727c..6269c610b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -22,53 +22,183 @@
#include "txrx.h"
#include "debug.h"
-void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt, bool limit_mgmt_desc)
+static u8 ath10k_htt_tx_txq_calc_size(size_t count)
{
- if (limit_mgmt_desc)
- htt->num_pending_mgmt_tx--;
+ int exp;
+ int factor;
+
+ exp = 0;
+ factor = count >> 7;
+
+ while (factor >= 64 && exp < 4) {
+ factor >>= 3;
+ exp++;
+ }
+
+ if (exp == 4)
+ return 0xff;
+
+ if (count > 0)
+ factor = max(1, factor);
+
+ return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) |
+ SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR);
+}
+
+static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_sta *arsta = (void *)txq->sta->drv_priv;
+ struct ath10k_vif *arvif = (void *)txq->vif->drv_priv;
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+ int idx;
+ u32 bit;
+ u16 peer_id;
+ u8 tid;
+ u8 count;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ if (txq->sta)
+ peer_id = arsta->peer_id;
+ else
+ peer_id = arvif->peer_id;
+
+ tid = txq->tid;
+ bit = BIT(peer_id % 32);
+ idx = peer_id / 32;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ count = ath10k_htt_tx_txq_calc_size(byte_cnt);
+
+ if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
+ unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
+ ath10k_warn(ar, "refusing to update txq for peer_id %hu tid %hhu due to out of bounds\n",
+ peer_id, tid);
+ return;
+ }
+
+ ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit;
+ ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %hu tid %hhu count %hhu\n",
+ peer_id, tid, count);
+}
+
+static void __ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ u32 seq;
+ size_t size;
+
+ lockdep_assert_held(&ar->htt.tx_lock);
+
+ if (!ar->htt.tx_q_state.enabled)
+ return;
+
+ if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL)
+ return;
+
+ seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq);
+ seq++;
+ ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq);
+
+ ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n",
+ seq);
+
+ size = sizeof(*ar->htt.tx_q_state.vaddr);
+ dma_sync_single_for_device(ar->dev,
+ ar->htt.tx_q_state.paddr,
+ size,
+ DMA_TO_DEVICE);
+}
+
+void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_sync(struct ath10k *ar)
+{
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ __ath10k_htt_tx_txq_recalc(hw, txq);
+ __ath10k_htt_tx_txq_sync(ar);
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
htt->num_pending_tx--;
if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
}
-static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
{
- spin_lock_bh(&htt->tx_lock);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
- spin_unlock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (htt->num_pending_tx >= htt->max_num_pending_tx)
+ return -EBUSY;
+
+ htt->num_pending_tx++;
+ if (htt->num_pending_tx == htt->max_num_pending_tx)
+ ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+
+ return 0;
}
-static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
- bool limit_mgmt_desc, bool is_probe_resp)
+int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
+ bool is_presp)
{
struct ath10k *ar = htt->ar;
- int ret = 0;
- spin_lock_bh(&htt->tx_lock);
+ lockdep_assert_held(&htt->tx_lock);
- if (htt->num_pending_tx >= htt->max_num_pending_tx) {
- ret = -EBUSY;
- goto exit;
- }
+ if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres)
+ return 0;
- if (limit_mgmt_desc) {
- if (is_probe_resp && (htt->num_pending_mgmt_tx >
- ar->hw_params.max_probe_resp_desc_thres)) {
- ret = -EBUSY;
- goto exit;
- }
- htt->num_pending_mgmt_tx++;
- }
+ if (is_presp &&
+ ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx)
+ return -EBUSY;
- htt->num_pending_tx++;
- if (htt->num_pending_tx == htt->max_num_pending_tx)
- ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
+ htt->num_pending_mgmt_tx++;
-exit:
- spin_unlock_bh(&htt->tx_lock);
- return ret;
+ return 0;
+}
+
+void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt)
+{
+ lockdep_assert_held(&htt->tx_lock);
+
+ if (!htt->ar->hw_params.max_probe_resp_desc_thres)
+ return;
+
+ htt->num_pending_mgmt_tx--;
}
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
@@ -137,7 +267,8 @@ static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar;
size_t size;
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
return;
size = sizeof(*htt->tx_q_state.vaddr);
@@ -152,7 +283,8 @@ static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt)
size_t size;
int ret;
- if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
return 0;
htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS;
@@ -209,8 +341,18 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
goto free_frag_desc;
}
+ size = roundup_pow_of_two(htt->max_num_pending_tx);
+ ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL);
+ if (ret) {
+ ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret);
+ goto free_txq;
+ }
+
return 0;
+free_txq:
+ ath10k_htt_tx_free_txq(htt);
+
free_frag_desc:
ath10k_htt_tx_free_cont_frag_desc(htt);
@@ -234,8 +376,8 @@ static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
- tx_done.discard = 1;
tx_done.msdu_id = msdu_id;
+ tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
ath10k_txrx_tx_unref(htt, &tx_done);
@@ -258,6 +400,8 @@ void ath10k_htt_tx_free(struct ath10k_htt *htt)
ath10k_htt_tx_free_txq(htt);
ath10k_htt_tx_free_cont_frag_desc(htt);
+ WARN_ON(!kfifo_is_empty(&htt->txdone_fifo));
+ kfifo_free(&htt->txdone_fifo);
}
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
@@ -371,7 +515,8 @@ int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt)
info |= SM(htt->tx_q_state.type,
HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE);
- if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features))
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID;
cfg = &cmd->frag_desc_bank_cfg;
@@ -535,6 +680,55 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
return 0;
}
+int ath10k_htt_tx_fetch_resp(struct ath10k *ar,
+ __le32 token,
+ __le16 fetch_seq_num,
+ struct htt_tx_fetch_record *records,
+ size_t num_records)
+{
+ struct sk_buff *skb;
+ struct htt_cmd *cmd;
+ const u16 resp_id = 0;
+ int len = 0;
+ int ret;
+
+ /* Response IDs are echo-ed back only for host driver convienence
+ * purposes. They aren't used for anything in the driver yet so use 0.
+ */
+
+ len += sizeof(cmd->hdr);
+ len += sizeof(cmd->tx_fetch_resp);
+ len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records;
+
+ skb = ath10k_htc_alloc_skb(ar, len);
+ if (!skb)
+ return -ENOMEM;
+
+ skb_put(skb, len);
+ cmd = (struct htt_cmd *)skb->data;
+ cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP;
+ cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id);
+ cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num;
+ cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records);
+ cmd->tx_fetch_resp.token = token;
+
+ memcpy(cmd->tx_fetch_resp.records, records,
+ sizeof(records[0]) * num_records);
+
+ ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit htc command: %d\n", ret);
+ goto err_free_skb;
+ }
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
@@ -576,20 +770,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
int msdu_id = -1;
int res;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
-
- if (res)
- goto err;
len += sizeof(cmd->hdr);
len += sizeof(cmd->mgmt_tx);
@@ -598,7 +778,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
@@ -649,8 +829,6 @@ err_free_msdu_id:
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
@@ -677,26 +855,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
u32 frags_paddr = 0;
u32 txbuf_paddr;
struct htt_msdu_ext_desc *ext_desc = NULL;
- bool limit_mgmt_desc = false;
- bool is_probe_resp = false;
-
- if (unlikely(ieee80211_is_mgmt(hdr->frame_control)) &&
- ar->hw_params.max_probe_resp_desc_thres) {
- limit_mgmt_desc = true;
-
- if (ieee80211_is_probe_resp(hdr->frame_control))
- is_probe_resp = true;
- }
-
- res = ath10k_htt_tx_inc_pending(htt, limit_mgmt_desc, is_probe_resp);
- if (res)
- goto err;
spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
spin_unlock_bh(&htt->tx_lock);
if (res < 0)
- goto err_tx_dec;
+ goto err;
msdu_id = res;
@@ -862,11 +1026,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode,
err_unmap_msdu:
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_msdu_id:
- spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_free_msdu_id(htt, msdu_id);
- spin_unlock_bh(&htt->tx_lock);
-err_tx_dec:
- ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
err:
return res;
}
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 3e7d4806a..1f9774f7a 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -35,8 +35,6 @@
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA988X_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
@@ -76,14 +74,10 @@ enum qca9377_chip_id_rev {
};
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA6174_HW_2_1_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_2_1_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA6174_HW_3_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_3_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
@@ -94,23 +88,17 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA99X0_HW_2_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA9377_HW_1_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA4019 1.0 definitions */
#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_FW_FILE "/*(DEBLOBBED)*/"
-#define QCA4019_HW_1_0_OTP_FILE "/*(DEBLOBBED)*/"
#define QCA4019_HW_1_0_BOARD_DATA_FILE "/*(DEBLOBBED)*/"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
@@ -134,8 +122,6 @@ enum qca9377_chip_id_rev {
#define REG_DUMP_COUNT_QCA988X 60
-#define QCA988X_CAL_DATA_LEN 2116
-
struct ath10k_fw_ie {
__le32 id;
__le32 len;
@@ -431,10 +417,14 @@ enum ath10k_hw_4addr_pad {
#define TARGET_10_4_ACTIVE_PEERS 0
#define TARGET_10_4_NUM_QCACHE_PEERS_MAX 512
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS 50
+#define TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC 35
#define TARGET_10_4_NUM_OFFLOAD_PEERS 0
#define TARGET_10_4_NUM_OFFLOAD_REORDER_BUFFS 0
#define TARGET_10_4_NUM_PEER_KEYS 2
#define TARGET_10_4_TGT_NUM_TIDS ((TARGET_10_4_NUM_PEERS) * 2)
+#define TARGET_10_4_NUM_MSDU_DESC (1024 + 400)
+#define TARGET_10_4_NUM_MSDU_DESC_PFC 2500
#define TARGET_10_4_AST_SKID_LIMIT 32
/* 100 ms for video, best-effort, and background */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e11160ab4..4040f9413 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -157,6 +157,26 @@ ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
+{
+ enum wmi_host_platform_type platform_type;
+ int ret;
+
+ if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
+ platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+ else
+ platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
+
+ ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
+
+ if (ret && ret != -EOPNOTSUPP) {
+ ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
/**********/
/* Crypto */
/**********/
@@ -449,10 +469,10 @@ static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(peer, &ar->peers, list) {
- if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->vif->addr))
continue;
- if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN))
+ if (ether_addr_equal(peer->addr, arvif->bssid))
continue;
if (peer->keys[key->keyidx] == key)
@@ -482,7 +502,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
enum wmi_phy_mode phymode = MODE_UNKNOWN;
switch (chandef->chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
@@ -505,7 +525,7 @@ chan_to_phymode(const struct cfg80211_chan_def *chandef)
break;
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
phymode = MODE_11A;
@@ -618,10 +638,15 @@ ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
*def = &conf->def;
}
-static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
+static int ath10k_peer_create(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ u32 vdev_id,
+ const u8 *addr,
enum wmi_peer_type peer_type)
{
struct ath10k_vif *arvif;
+ struct ath10k_peer *peer;
int num_peers = 0;
int ret;
@@ -650,6 +675,22 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr,
return ret;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, vdev_id, addr);
+ if (!peer) {
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
+ addr, vdev_id);
+ ath10k_wmi_peer_delete(ar, vdev_id, addr);
+ return -ENOENT;
+ }
+
+ peer->vif = vif;
+ peer->sta = sta;
+
+ spin_unlock_bh(&ar->data_lock);
+
ar->num_peers++;
return 0;
@@ -731,6 +772,7 @@ static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_peer *peer, *tmp;
+ int peer_id;
lockdep_assert_held(&ar->conf_mutex);
@@ -742,6 +784,11 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
peer->addr, vdev_id);
+ for_each_set_bit(peer_id, peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS) {
+ ar->peer_map[peer_id] = NULL;
+ }
+
list_del(&peer->list);
kfree(peer);
ar->num_peers--;
@@ -1725,7 +1772,7 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
!test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
arvif->vdev_id);
enable_ps = false;
@@ -2013,7 +2060,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
}
if (sta->mfp &&
- test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, ar->fw_features)) {
+ test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
+ ar->running_fw->fw_file.fw_features)) {
arg->peer_flags |= ar->wmi.peer_flags->pmf;
}
}
@@ -2028,7 +2076,7 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
struct cfg80211_chan_def def;
const struct ieee80211_supported_band *sband;
const struct ieee80211_rate *rates;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 ratemask;
u8 rate;
int i;
@@ -2088,7 +2136,7 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
int i, n;
@@ -2312,7 +2360,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u16 *vht_mcs_mask;
u8 ampdu_factor;
@@ -2330,7 +2378,7 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
arg->peer_flags |= ar->wmi.peer_flags->vht;
- if (def.chan->band == IEEE80211_BAND_2GHZ)
+ if (def.chan->band == NL80211_BAND_2GHZ)
arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
arg->peer_vht_caps = vht_cap->cap;
@@ -2399,7 +2447,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
{
- return sta->supp_rates[IEEE80211_BAND_2GHZ] >>
+ return sta->supp_rates[NL80211_BAND_2GHZ] >>
ATH10K_MAC_FIRST_OFDM_RATE_IDX;
}
@@ -2410,7 +2458,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
@@ -2423,7 +2471,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (sta->vht_cap.vht_supported &&
!ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -2443,7 +2491,7 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
/*
* Check VHT first.
*/
@@ -2821,7 +2869,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_supported_band **bands;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_channel *channel;
struct wmi_scan_chan_list_arg arg = {0};
struct wmi_channel_arg *ch;
@@ -2833,7 +2881,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
bands = hw->wiphy->bands;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2852,7 +2900,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return -ENOMEM;
ch = arg.channels;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!bands[band])
continue;
@@ -2890,7 +2938,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
/* FIXME: why use only legacy modes, why not any
* HT/VHT modes? Would that even make any
* difference? */
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
ch->mode = MODE_11G;
else
ch->mode = MODE_11A;
@@ -2994,6 +3042,13 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
+enum ath10k_mac_tx_path {
+ ATH10K_MAC_TX_HTT,
+ ATH10K_MAC_TX_HTT_MGMT,
+ ATH10K_MAC_TX_WMI_MGMT,
+ ATH10K_MAC_TX_UNKNOWN,
+};
+
void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
{
lockdep_assert_held(&ar->htt.tx_lock);
@@ -3153,7 +3208,8 @@ ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
*/
if (ar->htt.target_version_major < 3 &&
(ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
- !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features))
+ !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
+ ar->running_fw->fw_file.fw_features))
return ATH10K_HW_TXRX_MGMT;
/* Workaround:
@@ -3271,6 +3327,28 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
}
}
+static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_txq *txq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
+
+ cb->flags = 0;
+ if (!ath10k_tx_h_use_hwcrypto(vif, skb))
+ cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
+
+ if (ieee80211_is_mgmt(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_MGMT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ cb->flags |= ATH10K_SKB_F_QOS;
+
+ cb->vif = vif;
+ cb->txq = txq;
+}
+
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
{
/* FIXME: Not really sure since when the behaviour changed. At some
@@ -3281,7 +3359,7 @@ bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
*/
return (ar->htt.target_version_major >= 3 &&
ar->htt.target_version_minor >= 4 &&
- ar->htt.op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
+ ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
}
static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
@@ -3306,26 +3384,50 @@ unlock:
return ret;
}
-static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
- struct sk_buff *skb)
+static enum ath10k_mac_tx_path
+ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
+ struct sk_buff *skb,
+ enum ath10k_hw_txrx_mode txmode)
{
- struct ath10k_htt *htt = &ar->htt;
- int ret = 0;
-
switch (txmode) {
case ATH10K_HW_TXRX_RAW:
case ATH10K_HW_TXRX_NATIVE_WIFI:
case ATH10K_HW_TXRX_ETHERNET:
- ret = ath10k_htt_tx(htt, txmode, skb);
- break;
+ return ATH10K_MAC_TX_HTT;
case ATH10K_HW_TXRX_MGMT:
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
- ar->fw_features))
- ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ ar->running_fw->fw_file.fw_features))
+ return ATH10K_MAC_TX_WMI_MGMT;
else if (ar->htt.target_version_major >= 3)
- ret = ath10k_htt_tx(htt, txmode, skb);
+ return ATH10K_MAC_TX_HTT;
else
- ret = ath10k_htt_mgmt_tx(htt, skb);
+ return ATH10K_MAC_TX_HTT_MGMT;
+ }
+
+ return ATH10K_MAC_TX_UNKNOWN;
+}
+
+static int ath10k_mac_tx_submit(struct ath10k *ar,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ath10k_htt *htt = &ar->htt;
+ int ret = -EINVAL;
+
+ switch (txpath) {
+ case ATH10K_MAC_TX_HTT:
+ ret = ath10k_htt_tx(htt, txmode, skb);
+ break;
+ case ATH10K_MAC_TX_HTT_MGMT:
+ ret = ath10k_htt_mgmt_tx(htt, skb);
+ break;
+ case ATH10K_MAC_TX_WMI_MGMT:
+ ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
+ break;
+ case ATH10K_MAC_TX_UNKNOWN:
+ WARN_ON_ONCE(1);
+ ret = -EINVAL;
break;
}
@@ -3334,6 +3436,64 @@ static void ath10k_mac_tx(struct ath10k *ar, enum ath10k_hw_txrx_mode txmode,
ret);
ieee80211_free_txskb(ar->hw, skb);
}
+
+ return ret;
+}
+
+/* This function consumes the sk_buff regardless of return value as far as
+ * caller is concerned so no freeing is necessary afterwards.
+ */
+static int ath10k_mac_tx(struct ath10k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ath10k_hw_txrx_mode txmode,
+ enum ath10k_mac_tx_path txpath,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ int ret;
+
+ /* We should disable CCK RATE due to P2P */
+ if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+
+ switch (txmode) {
+ case ATH10K_HW_TXRX_MGMT:
+ case ATH10K_HW_TXRX_NATIVE_WIFI:
+ ath10k_tx_h_nwifi(hw, skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
+ ath10k_tx_h_seq_no(vif, skb);
+ break;
+ case ATH10K_HW_TXRX_ETHERNET:
+ ath10k_tx_h_8023(skb);
+ break;
+ case ATH10K_HW_TXRX_RAW:
+ if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
+ WARN_ON_ONCE(1);
+ ieee80211_free_txskb(hw, skb);
+ return -ENOTSUPP;
+ }
+ }
+
+ if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
+ if (!ath10k_mac_tx_frm_has_freq(ar)) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
+ skb);
+
+ skb_queue_tail(&ar->offchan_tx_queue, skb);
+ ieee80211_queue_work(hw, &ar->offchan_tx_work);
+ return 0;
+ }
+ }
+
+ ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to submit frame: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
}
void ath10k_offchan_tx_purge(struct ath10k *ar)
@@ -3354,12 +3514,13 @@ void ath10k_offchan_tx_work(struct work_struct *work)
struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
struct ieee80211_hdr *hdr;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
struct sk_buff *skb;
const u8 *peer_addr;
- enum ath10k_hw_txrx_mode txmode;
int vdev_id;
int ret;
unsigned long time_left;
@@ -3396,7 +3557,8 @@ void ath10k_offchan_tx_work(struct work_struct *work)
peer_addr, vdev_id);
if (!peer) {
- ret = ath10k_peer_create(ar, vdev_id, peer_addr,
+ ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
+ peer_addr,
WMI_PEER_TYPE_DEFAULT);
if (ret)
ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
@@ -3423,8 +3585,14 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
- ath10k_mac_tx(ar, txmode, skb);
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
+ ret);
+ /* not serious */
+ }
time_left =
wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
@@ -3476,6 +3644,175 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
}
}
+static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ if (!txq)
+ return;
+
+ INIT_LIST_HEAD(&artxq->list);
+}
+
+static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
+{
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ath10k_skb_cb *cb;
+ struct sk_buff *msdu;
+ int msdu_id;
+
+ if (!txq)
+ return;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (!list_empty(&artxq->list))
+ list_del_init(&artxq->list);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
+ cb = ATH10K_SKB_CB(msdu);
+ if (cb->txq == txq)
+ cb->txq = NULL;
+ }
+ spin_unlock_bh(&ar->htt.tx_lock);
+}
+
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid)
+{
+ struct ath10k_peer *peer;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ peer = ar->peer_map[peer_id];
+ if (!peer)
+ return NULL;
+
+ if (peer->sta)
+ return peer->sta->txq[tid];
+ else if (peer->vif)
+ return peer->vif->txq;
+ else
+ return NULL;
+}
+
+static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ /* No need to get locks */
+
+ if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
+ return true;
+
+ if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
+ return true;
+
+ if (artxq->num_fw_queued < artxq->num_push_allowed)
+ return true;
+
+ return false;
+}
+
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_htt *htt = &ar->htt;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ struct sk_buff *skb;
+ size_t skb_len;
+ int ret;
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ret = ath10k_htt_tx_inc_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ if (ret)
+ return ret;
+
+ skb = ieee80211_tx_dequeue(hw, txq);
+ if (!skb) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return -ENOENT;
+ }
+
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
+
+ skb_len = skb->len;
+ txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (unlikely(ret)) {
+ ath10k_warn(ar, "failed to push frame: %d\n", ret);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return ret;
+ }
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ artxq->num_fw_queued++;
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ return skb_len;
+}
+
+void ath10k_mac_tx_push_pending(struct ath10k *ar)
+{
+ struct ieee80211_hw *hw = ar->hw;
+ struct ieee80211_txq *txq;
+ struct ath10k_txq *artxq;
+ struct ath10k_txq *last;
+ int ret;
+ int max;
+
+ spin_lock_bh(&ar->txqs_lock);
+ rcu_read_lock();
+
+ last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
+ while (!list_empty(&ar->txqs)) {
+ artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
+ txq = container_of((void *)artxq, struct ieee80211_txq,
+ drv_priv);
+
+ /* Prevent aggressive sta/tid taking over tx queue */
+ max = 16;
+ ret = 0;
+ while (ath10k_mac_tx_can_push(hw, txq) && max--) {
+ ret = ath10k_mac_tx_push_txq(hw, txq);
+ if (ret < 0)
+ break;
+ }
+
+ list_del_init(&artxq->list);
+ if (ret != -ENOENT)
+ list_add_tail(&artxq->list, &ar->txqs);
+
+ ath10k_htt_tx_txq_update(hw, txq);
+
+ if (artxq == last || (ret < 0 && ret != -ENOENT))
+ break;
+ }
+
+ rcu_read_unlock();
+ spin_unlock_bh(&ar->txqs_lock);
+}
+
/************/
/* Scanning */
/************/
@@ -3531,7 +3868,7 @@ static int ath10k_scan_stop(struct ath10k *ar)
goto out;
}
- ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
ret = -ETIMEDOUT;
@@ -3611,7 +3948,7 @@ static int ath10k_start_scan(struct ath10k *ar,
if (ret)
return ret;
- ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
if (ret == 0) {
ret = ath10k_scan_stop(ar);
if (ret)
@@ -3638,66 +3975,86 @@ static int ath10k_start_scan(struct ath10k *ar,
/* mac80211 callbacks */
/**********************/
-static void ath10k_tx(struct ieee80211_hw *hw,
- struct ieee80211_tx_control *control,
- struct sk_buff *skb)
+static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
{
struct ath10k *ar = hw->priv;
- struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
+ struct ath10k_htt *htt = &ar->htt;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct ieee80211_sta *sta = control->sta;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_txq *txq = NULL;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
enum ath10k_hw_txrx_mode txmode;
+ enum ath10k_mac_tx_path txpath;
+ bool is_htt;
+ bool is_mgmt;
+ bool is_presp;
+ int ret;
- /* We should disable CCK RATE due to P2P */
- if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
- ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
+ ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
+ txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
+ is_htt = (txpath == ATH10K_MAC_TX_HTT ||
+ txpath == ATH10K_MAC_TX_HTT_MGMT);
+ is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
- skb_cb->flags = 0;
- if (!ath10k_tx_h_use_hwcrypto(vif, skb))
- skb_cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
-
- if (ieee80211_is_mgmt(hdr->frame_control))
- skb_cb->flags |= ATH10K_SKB_F_MGMT;
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ is_presp = ieee80211_is_probe_resp(hdr->frame_control);
- if (ieee80211_is_data_qos(hdr->frame_control))
- skb_cb->flags |= ATH10K_SKB_F_QOS;
-
- skb_cb->vif = vif;
+ ret = ath10k_htt_tx_inc_pending(htt);
+ if (ret) {
+ ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
+ ret);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
+ return;
+ }
- switch (txmode) {
- case ATH10K_HW_TXRX_MGMT:
- case ATH10K_HW_TXRX_NATIVE_WIFI:
- ath10k_tx_h_nwifi(hw, skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
- ath10k_tx_h_seq_no(vif, skb);
- break;
- case ATH10K_HW_TXRX_ETHERNET:
- ath10k_tx_h_8023(skb);
- break;
- case ATH10K_HW_TXRX_RAW:
- if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
- WARN_ON_ONCE(1);
- ieee80211_free_txskb(hw, skb);
+ ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
+ if (ret) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
+ ret);
+ ath10k_htt_tx_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
+ ieee80211_free_txskb(ar->hw, skb);
return;
}
+ spin_unlock_bh(&ar->htt.tx_lock);
}
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
- if (!ath10k_mac_tx_frm_has_freq(ar)) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n",
- skb);
-
- skb_queue_tail(&ar->offchan_tx_queue, skb);
- ieee80211_queue_work(hw, &ar->offchan_tx_work);
- return;
+ ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
+ if (ret) {
+ ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
+ if (is_htt) {
+ spin_lock_bh(&ar->htt.tx_lock);
+ ath10k_htt_tx_dec_pending(htt);
+ if (is_mgmt)
+ ath10k_htt_tx_mgmt_dec_pending(htt);
+ spin_unlock_bh(&ar->htt.tx_lock);
}
+ return;
}
+}
- ath10k_mac_tx(ar, txmode, skb);
+static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_txq *artxq = (void *)txq->drv_priv;
+
+ spin_lock_bh(&ar->txqs_lock);
+ if (list_empty(&artxq->list))
+ list_add_tail(&artxq->list, &ar->txqs);
+ spin_unlock_bh(&ar->txqs_lock);
+
+ if (ath10k_mac_tx_can_push(hw, txq))
+ tasklet_schedule(&ar->htt.txrx_compl_task);
+
+ ath10k_htt_tx_txq_update(hw, txq);
}
/* Must not be called with conf_mutex held as workers can use that also. */
@@ -3919,14 +4276,11 @@ static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
vht_cap = ath10k_create_vht_cap(ar);
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->ht_cap = ht_cap;
-
- /* Enable the VHT support at 2.4 GHz */
- band->vht_cap = vht_cap;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->ht_cap = ht_cap;
band->vht_cap = vht_cap;
}
@@ -3989,7 +4343,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
/*
* This makes sense only when restarting hw. It is harmless to call
- * uncoditionally. This is necessary to make sure no HTT/WMI tx
+ * unconditionally. This is necessary to make sure no HTT/WMI tx
* commands will be submitted while restarting.
*/
ath10k_drain_tx(ar);
@@ -4021,7 +4375,8 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_off;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+ &ar->normal_mode_fw);
if (ret) {
ath10k_err(ar, "Could not init core: %d\n", ret);
goto err_power_down;
@@ -4079,7 +4434,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
- ar->fw_features)) {
+ ar->running_fw->fw_file.fw_features)) {
ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
WMI_CCA_DETECT_LEVEL_AUTO,
WMI_CCA_DETECT_MARGIN_AUTO);
@@ -4100,7 +4455,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
ar->ani_enabled = true;
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+ if (ath10k_peer_stats_enabled(ar)) {
param = ar->wmi.pdev_param->peer_stats_update_period;
ret = ath10k_wmi_pdev_set_param(ar, param,
PEER_DEFAULT_STATS_UPDATE_PERIOD);
@@ -4313,6 +4668,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
enum wmi_sta_powersave_param param;
int ret = 0;
u32 value;
@@ -4325,6 +4681,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
memset(arvif, 0, sizeof(*arvif));
+ ath10k_mac_txq_init(vif->txq);
arvif->ar = ar;
arvif->vif = vif;
@@ -4508,13 +4865,31 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
- ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr,
- WMI_PEER_TYPE_DEFAULT);
+ ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
+ vif->addr, WMI_PEER_TYPE_DEFAULT);
if (ret) {
ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
arvif->vdev_id, ret);
goto err_vdev_delete;
}
+
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ret = -ENOENT;
+ goto err_peer_delete;
+ }
+
+ arvif->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+ } else {
+ arvif->peer_id = HTT_INVALID_PEERID;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
@@ -4625,7 +5000,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ struct ath10k_peer *peer;
int ret;
+ int i;
cancel_work_sync(&arvif->ap_csa_work);
cancel_delayed_work_sync(&arvif->connection_loss_work);
@@ -4679,7 +5056,22 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
}
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->vif == vif) {
+ ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
+ vif->addr, arvif->vdev_id);
+ peer->vif = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
ath10k_peer_cleanup(ar, arvif->vdev_id);
+ ath10k_mac_txq_unref(ar, vif->txq);
if (vif->type == NL80211_IFTYPE_MONITOR) {
ar->monitor_arvif = NULL;
@@ -4692,6 +5084,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_mac_vif_tx_unlock_all(arvif);
spin_unlock_bh(&ar->htt.tx_lock);
+ ath10k_mac_txq_unref(ar, vif->txq);
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5221,7 +5615,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
struct ath10k_sta *arsta;
struct ieee80211_sta *sta;
struct cfg80211_chan_def def;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u32 changed, bw, nss, smps;
@@ -5396,13 +5790,18 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+ struct ath10k_peer *peer;
int ret = 0;
+ int i;
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_init(sta->txq[i]);
}
/* cancel must be done outside the mutex to avoid deadlock */
@@ -5437,8 +5836,8 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (sta->tdls)
peer_type = WMI_PEER_TYPE_TDLS;
- ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr,
- peer_type);
+ ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
+ sta->addr, peer_type);
if (ret) {
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
@@ -5446,6 +5845,24 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
goto exit;
}
+ spin_lock_bh(&ar->data_lock);
+
+ peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
+ if (!peer) {
+ ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
+ vif->addr, arvif->vdev_id);
+ spin_unlock_bh(&ar->data_lock);
+ ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
+ ath10k_mac_dec_num_stations(arvif, sta);
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ arsta->peer_id = find_first_bit(peer->peer_ids,
+ ATH10K_MAX_NUM_PEER_IDS);
+
+ spin_unlock_bh(&ar->data_lock);
+
if (!sta->tdls)
goto exit;
@@ -5508,6 +5925,23 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_mac_dec_num_stations(arvif, sta);
+ spin_lock_bh(&ar->data_lock);
+ for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
+ peer = ar->peer_map[i];
+ if (!peer)
+ continue;
+
+ if (peer->sta == sta) {
+ ath10k_warn(ar, "found sta peer %pM entry on vdev %i after it was supposedly removed\n",
+ sta->addr, arvif->vdev_id);
+ peer->sta = NULL;
+ }
+ }
+ spin_unlock_bh(&ar->data_lock);
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ ath10k_mac_txq_unref(ar, sta->txq[i]);
+
if (!sta->tdls)
goto exit;
@@ -5754,7 +6188,7 @@ exit:
return ret;
}
-#define ATH10K_ROC_TIMEOUT_HZ (2*HZ)
+#define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5818,7 +6252,7 @@ static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
goto exit;
}
- ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ);
+ ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
if (ret == 0) {
ath10k_warn(ar, "failed to switch to channel for roc scan\n");
@@ -5970,6 +6404,39 @@ static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static void
+ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
+ struct ieee80211_channel *channel)
+{
+ int ret;
+ enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
+ (ar->rx_channel != channel))
+ return;
+
+ if (ar->scan.state != ATH10K_SCAN_IDLE) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
+ return;
+ }
+
+ reinit_completion(&ar->bss_survey_done);
+
+ ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
+ if (ret) {
+ ath10k_warn(ar, "failed to send pdev bss chan info request\n");
+ return;
+ }
+
+ ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
+ if (!ret) {
+ ath10k_warn(ar, "bss channel survey timed out\n");
+ return;
+ }
+}
+
static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
@@ -5980,20 +6447,22 @@ static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
mutex_lock(&ar->conf_mutex);
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
}
+ ath10k_mac_update_bss_chan_survey(ar, survey->channel);
+
spin_lock_bh(&ar->data_lock);
memcpy(survey, ar_survey, sizeof(*survey));
spin_unlock_bh(&ar->data_lock);
@@ -6010,7 +6479,7 @@ exit:
static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int num_rates = 0;
@@ -6029,7 +6498,7 @@ ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
int *nss)
{
@@ -6078,7 +6547,7 @@ ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask,
u8 *rate, u8 *nss)
{
@@ -6179,7 +6648,7 @@ static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
- enum ieee80211_band band,
+ enum nl80211_band band,
const struct cfg80211_bitrate_mask *mask)
{
int i;
@@ -6231,7 +6700,7 @@ static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct cfg80211_chan_def def;
struct ath10k *ar = arvif->ar;
- enum ieee80211_band band;
+ enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
u8 rate;
@@ -6382,6 +6851,32 @@ static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return 0;
}
+static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u64 tsf)
+{
+ struct ath10k *ar = hw->priv;
+ struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
+ u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
+ int ret;
+
+ /* Workaround:
+ *
+ * Given tsf argument is entire TSF value, but firmware accepts
+ * only TSF offset to current TSF.
+ *
+ * get_tsf function is used to get offset value, however since
+ * ath10k_get_tsf is not implemented properly, it will return 0 always.
+ * Luckily all the caller functions to set_tsf, as of now, also rely on
+ * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
+ * final tsf offset value to firmware will be arithmetically correct.
+ */
+ tsf_offset = tsf - ath10k_get_tsf(hw, vif);
+ ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
+ vdev_param, tsf_offset);
+ if (ret && ret != -EOPNOTSUPP)
+ ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
+}
+
static int ath10k_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
@@ -6816,7 +7311,8 @@ ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
}
static const struct ieee80211_ops ath10k_ops = {
- .tx = ath10k_tx,
+ .tx = ath10k_mac_op_tx,
+ .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
.start = ath10k_start,
.stop = ath10k_stop,
.config = ath10k_config,
@@ -6843,6 +7339,7 @@ static const struct ieee80211_ops ath10k_ops = {
.set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
+ .set_tsf = ath10k_set_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
@@ -6866,7 +7363,7 @@ static const struct ieee80211_ops ath10k_ops = {
};
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -6875,7 +7372,7 @@ static const struct ieee80211_ops ath10k_ops = {
}
#define CHAN5G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -7195,13 +7692,13 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
band->channels = channels;
band->n_bitrates = ath10k_g_rates_size;
band->bitrates = ath10k_g_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
@@ -7213,12 +7710,12 @@ int ath10k_mac_register(struct ath10k *ar)
goto err_free;
}
- band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
+ band = &ar->mac.sbands[NL80211_BAND_5GHZ];
band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
band->channels = channels;
band->n_bitrates = ath10k_a_rates_size;
band->bitrates = ath10k_a_rates;
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
}
ath10k_mac_setup_ht_vht_cap(ar);
@@ -7231,7 +7728,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
- if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
ar->hw->wiphy->interface_modes |=
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -7271,6 +7768,7 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->vif_data_size = sizeof(struct ath10k_vif);
ar->hw->sta_data_size = sizeof(struct ath10k_sta);
+ ar->hw->txq_data_size = sizeof(struct ath10k_txq);
ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
@@ -7295,7 +7793,8 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->max_remain_on_channel_duration = 5000;
ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
- ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE;
+ ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
+ NL80211_FEATURE_AP_SCAN;
ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
@@ -7319,7 +7818,7 @@ int ath10k_mac_register(struct ath10k *ar)
*/
ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_MAIN:
ar->hw->wiphy->iface_combinations = ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations =
@@ -7404,8 +7903,8 @@ err_dfs_detector_exit:
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
return ret;
@@ -7418,8 +7917,8 @@ void ath10k_mac_unregister(struct ath10k *ar)
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
- kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
- kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
SET_IEEE80211_DEV(ar->hw, NULL);
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 530915880..1bd29ecfc 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -75,6 +75,13 @@ void ath10k_mac_tx_unlock(struct ath10k *ar, int reason);
void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason);
void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason);
bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar);
+void ath10k_mac_tx_push_pending(struct ath10k *ar);
+int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq);
+struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
+ u16 peer_id,
+ u8 tid);
+int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index f21f09cce..d2674ade4 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -33,12 +33,6 @@
#include "ce.h"
#include "pci.h"
-enum ath10k_pci_irq_mode {
- ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
- ATH10K_PCI_IRQ_MSI = 2,
-};
-
enum ath10k_pci_reset_mode {
ATH10K_PCI_RESET_AUTO = 0,
ATH10K_PCI_RESET_WARM_ONLY = 1,
@@ -745,10 +739,7 @@ static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- if (ar_pci->num_msi_intrs > 1)
- return "msi-x";
-
- if (ar_pci->num_msi_intrs == 1)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_MSI)
return "msi";
return "legacy";
@@ -809,7 +800,8 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
spin_lock_bh(&ar_pci->ce_lock);
num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
spin_unlock_bh(&ar_pci->ce_lock);
- while (num--) {
+
+ while (num >= 0) {
ret = __ath10k_pci_rx_post_buf(pipe);
if (ret) {
if (ret == -ENOSPC)
@@ -819,6 +811,7 @@ static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
ATH10K_PCI_RX_POST_RETRY_MS);
break;
}
+ num--;
}
}
@@ -870,10 +863,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
/* Host buffer address in CE space */
u32 ce_data;
@@ -909,7 +900,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &ce_data, ce_data);
if (ret != 0)
goto done;
@@ -940,9 +931,10 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -956,7 +948,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
}
- if (buf != ce_data) {
+ if (*buf != ce_data) {
ret = -EIO;
goto done;
}
@@ -1026,10 +1018,8 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret = 0;
- u32 buf;
+ u32 *buf;
unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
- unsigned int id;
- unsigned int flags;
struct ath10k_ce_pipe *ce_diag;
void *data_buf = NULL;
u32 ce_data; /* Host buffer address in CE space */
@@ -1078,7 +1068,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
- ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
+ ret = __ath10k_ce_rx_post_buf(ce_diag, &address, address);
if (ret != 0)
goto done;
@@ -1103,9 +1093,10 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
- while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
- &completed_nbytes,
- &id, &flags) != 0) {
+ while (ath10k_ce_completed_recv_next_nolock(ce_diag,
+ (void **)&buf,
+ &completed_nbytes)
+ != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
@@ -1119,7 +1110,7 @@ int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
}
- if (buf != address) {
+ if (*buf != address) {
ret = -EIO;
goto done;
}
@@ -1181,15 +1172,11 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
struct sk_buff *skb;
struct sk_buff_head list;
void *transfer_context;
- u32 ce_data;
unsigned int nbytes, max_nbytes;
- unsigned int transfer_id;
- unsigned int flags;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
- &ce_data, &nbytes, &transfer_id,
- &flags) == 0) {
+ &nbytes) == 0) {
skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
@@ -1218,6 +1205,63 @@ static void ath10k_pci_process_rx_cb(struct ath10k_ce_pipe *ce_state,
ath10k_pci_rx_post_pipe(pipe_info);
}
+static void ath10k_pci_process_htt_rx_cb(struct ath10k_ce_pipe *ce_state,
+ void (*callback)(struct ath10k *ar,
+ struct sk_buff *skb))
+{
+ struct ath10k *ar = ce_state->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
+ struct ath10k_ce_pipe *ce_pipe = pipe_info->ce_hdl;
+ struct sk_buff *skb;
+ struct sk_buff_head list;
+ void *transfer_context;
+ unsigned int nbytes, max_nbytes, nentries;
+ int orig_len;
+
+ /* No need to aquire ce_lock for CE5, since this is the only place CE5
+ * is processed other than init and deinit. Before releasing CE5
+ * buffers, interrupts are disabled. Thus CE5 access is serialized.
+ */
+ __skb_queue_head_init(&list);
+ while (ath10k_ce_completed_recv_next_nolock(ce_state, &transfer_context,
+ &nbytes) == 0) {
+ skb = transfer_context;
+ max_nbytes = skb->len + skb_tailroom(skb);
+
+ if (unlikely(max_nbytes < nbytes)) {
+ ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
+ nbytes, max_nbytes);
+ continue;
+ }
+
+ dma_sync_single_for_cpu(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ max_nbytes, DMA_FROM_DEVICE);
+ skb_put(skb, nbytes);
+ __skb_queue_tail(&list, skb);
+ }
+
+ nentries = skb_queue_len(&list);
+ while ((skb = __skb_dequeue(&list))) {
+ ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
+ ce_state->id, skb->len);
+ ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
+ skb->data, skb->len);
+
+ orig_len = skb->len;
+ callback(ar, skb);
+ skb_push(skb, orig_len - skb->len);
+ skb_reset_tail_pointer(skb);
+ skb_trim(skb, 0);
+
+ /*let device gain the buffer again*/
+ dma_sync_single_for_device(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+ }
+ ath10k_ce_rx_update_write_idx(ce_pipe, nentries);
+}
+
/* Called by lower (CE) layer when data is received from the Target. */
static void ath10k_pci_htc_rx_cb(struct ath10k_ce_pipe *ce_state)
{
@@ -1274,7 +1318,7 @@ static void ath10k_pci_htt_rx_cb(struct ath10k_ce_pipe *ce_state)
*/
ath10k_ce_per_engine_service(ce_state->ar, 4);
- ath10k_pci_process_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
+ ath10k_pci_process_htt_rx_cb(ce_state, ath10k_pci_htt_rx_deliver);
}
int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
@@ -1449,13 +1493,8 @@ void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
void ath10k_pci_kill_tasklet(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_kill(&ar_pci->intr_tq);
- tasklet_kill(&ar_pci->msi_fw_err);
-
- for (i = 0; i < CE_COUNT; i++)
- tasklet_kill(&ar_pci->pipe_info[i].intr);
del_timer_sync(&ar_pci->rx_post_retry);
}
@@ -1571,10 +1610,8 @@ static void ath10k_pci_irq_disable(struct ath10k *ar)
static void ath10k_pci_irq_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- synchronize_irq(ar_pci->pdev->irq + i);
+ synchronize_irq(ar_pci->pdev->irq);
}
static void ath10k_pci_irq_enable(struct ath10k *ar)
@@ -1835,13 +1872,10 @@ static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct bmi_xfer *xfer;
- u32 ce_data;
unsigned int nbytes;
- unsigned int transfer_id;
- unsigned int flags;
- if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
- &nbytes, &transfer_id, &flags))
+ if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer,
+ &nbytes))
return;
if (WARN_ON_ONCE(!xfer))
@@ -2546,65 +2580,6 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
#endif
};
-static void ath10k_pci_ce_tasklet(unsigned long ptr)
-{
- struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
- struct ath10k_pci *ar_pci = pipe->ar_pci;
-
- ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
-}
-
-static void ath10k_msi_err_tasklet(unsigned long data)
-{
- struct ath10k *ar = (struct ath10k *)data;
-
- if (!ath10k_pci_has_fw_crashed(ar)) {
- ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
- return;
- }
-
- ath10k_pci_irq_disable(ar);
- ath10k_pci_fw_crashed_clear(ar);
- ath10k_pci_fw_crashed_dump(ar);
-}
-
-/*
- * Handler for a per-engine interrupt on a PARTICULAR CE.
- * This is used in cases where each CE has a private MSI interrupt.
- */
-static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
-
- if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
- ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
- ce_id);
- return IRQ_HANDLED;
- }
-
- /*
- * NOTE: We are able to derive ce_id from irq because we
- * use a one-to-one mapping for CE's 0..5.
- * CE's 6 & 7 do not use interrupts at all.
- *
- * This mapping must be kept in sync with the mapping
- * used by firmware.
- */
- tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
-{
- struct ath10k *ar = arg;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- tasklet_schedule(&ar_pci->msi_fw_err);
- return IRQ_HANDLED;
-}
-
/*
* Top-level interrupt handler for all PCI interrupts from a Target.
* When a block of MSI interrupts is allocated, this top-level handler
@@ -2622,7 +2597,7 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if (ar_pci->num_msi_intrs == 0) {
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) {
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
@@ -2649,43 +2624,10 @@ static void ath10k_pci_tasklet(unsigned long data)
ath10k_ce_per_engine_service_any(ar);
/* Re-enable legacy irq that was disabled in the irq handler */
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
ath10k_pci_enable_legacy_irq(ar);
}
-static int ath10k_pci_request_irq_msix(struct ath10k *ar)
-{
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int ret, i;
-
- ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
- ath10k_pci_msi_fw_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
- ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
- return ret;
- }
-
- for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
- ret = request_irq(ar_pci->pdev->irq + i,
- ath10k_pci_per_engine_handler,
- IRQF_SHARED, "ath10k_pci", ar);
- if (ret) {
- ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
- ar_pci->pdev->irq + i, ret);
-
- for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
- free_irq(ar_pci->pdev->irq + i, ar);
-
- free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
- return ret;
- }
- }
-
- return 0;
-}
-
static int ath10k_pci_request_irq_msi(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -2724,41 +2666,28 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
return ath10k_pci_request_irq_legacy(ar);
- case 1:
+ case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
- return ath10k_pci_request_irq_msix(ar);
+ return -EINVAL;
}
}
static void ath10k_pci_free_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
- /* There's at least one interrupt irregardless whether its legacy INTR
- * or MSI or MSI-X */
- for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
- free_irq(ar_pci->pdev->irq + i, ar);
+ free_irq(ar_pci->pdev->irq, ar);
}
void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- int i;
tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
- tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
- (unsigned long)ar);
-
- for (i = 0; i < CE_COUNT; i++) {
- ar_pci->pipe_info[i].ar_pci = ar_pci;
- tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
- (unsigned long)&ar_pci->pipe_info[i]);
- }
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -2772,20 +2701,9 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
ath10k_info(ar, "limiting irq mode to: %d\n",
ath10k_pci_irq_mode);
- /* Try MSI-X */
- if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
- ar_pci->num_msi_intrs = MSI_ASSIGN_CE_MAX + 1;
- ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
- ar_pci->num_msi_intrs);
- if (ret > 0)
- return 0;
-
- /* fall-through */
- }
-
/* Try MSI */
if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
- ar_pci->num_msi_intrs = 1;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
return 0;
@@ -2801,7 +2719,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* This write might get lost if target has NOT written BAR.
* For now, fix the race by repeating the write in below
* synchronization checking. */
- ar_pci->num_msi_intrs = 0;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -2819,8 +2737,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
- switch (ar_pci->num_msi_intrs) {
- case 0:
+ switch (ar_pci->oper_irq_mode) {
+ case ATH10K_PCI_IRQ_LEGACY:
ath10k_pci_deinit_irq_legacy(ar);
break;
default:
@@ -2858,7 +2776,7 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->num_msi_intrs == 0)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
/* Fix potential race by repeating CORE_BASE writes */
ath10k_pci_enable_legacy_irq(ar);
@@ -3136,8 +3054,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
- ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
- ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
+ ath10k_info(ar, "pci irq %s oper_irq_mode %d irq_mode %d reset_mode %d\n",
+ ath10k_pci_get_irq_method(ar), ar_pci->oper_irq_mode,
ath10k_pci_irq_mode, ath10k_pci_reset_mode);
ret = ath10k_pci_request_irq(ar);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 249c73a69..959dc321b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -148,9 +148,6 @@ struct ath10k_pci_pipe {
/* protects compl_free and num_send_allowed */
spinlock_t pipe_lock;
-
- struct ath10k_pci *ar_pci;
- struct tasklet_struct intr;
};
struct ath10k_pci_supp_chip {
@@ -164,6 +161,12 @@ struct ath10k_bus_ops {
int (*get_num_banks)(struct ath10k *ar);
};
+enum ath10k_pci_irq_mode {
+ ATH10K_PCI_IRQ_AUTO = 0,
+ ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_MSI = 2,
+};
+
struct ath10k_pci {
struct pci_dev *pdev;
struct device *dev;
@@ -171,14 +174,10 @@ struct ath10k_pci {
void __iomem *mem;
size_t mem_len;
- /*
- * Number of MSI interrupts granted, 0 --> using legacy PCI line
- * interrupts.
- */
- int num_msi_intrs;
+ /* Operating interrupt mode */
+ enum ath10k_pci_irq_mode oper_irq_mode;
struct tasklet_struct intr_tq;
- struct tasklet_struct msi_fw_err;
struct ath10k_pci_pipe pipe_info[CE_COUNT_MAX];
diff --git a/drivers/net/wireless/ath/ath10k/swap.c b/drivers/net/wireless/ath/ath10k/swap.c
index 3ca3fae40..0c5f5863d 100644
--- a/drivers/net/wireless/ath/ath10k/swap.c
+++ b/drivers/net/wireless/ath/ath10k/swap.c
@@ -134,27 +134,17 @@ ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
return seg_info;
}
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type)
+int ath10k_swap_code_seg_configure(struct ath10k *ar)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
- switch (type) {
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW:
- if (!ar->swap.firmware_swap_code_seg_info)
- return 0;
-
- ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
- seg_info = ar->swap.firmware_swap_code_seg_info;
- break;
- default:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP:
- case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF:
- ath10k_warn(ar, "ignoring unknown code swap binary type %d\n",
- type);
+ if (!ar->swap.firmware_swap_code_seg_info)
return 0;
- }
+
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
+
+ seg_info = ar->swap.firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
@@ -171,8 +161,13 @@ int ath10k_swap_code_seg_configure(struct ath10k *ar,
void ath10k_swap_code_seg_release(struct ath10k *ar)
{
ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info);
- ar->swap.firmware_codeswap_data = NULL;
- ar->swap.firmware_codeswap_len = 0;
+
+ /* FIXME: these two assignments look to bein wrong place! Shouldn't
+ * they be in ath10k_core_free_firmware_files() like the rest?
+ */
+ ar->normal_mode_fw.fw_file.codeswap_data = NULL;
+ ar->normal_mode_fw.fw_file.codeswap_len = 0;
+
ar->swap.firmware_swap_code_seg_info = NULL;
}
@@ -180,20 +175,23 @@ int ath10k_swap_code_seg_init(struct ath10k *ar)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
+ const void *codeswap_data;
+ size_t codeswap_len;
+
+ codeswap_data = ar->normal_mode_fw.fw_file.codeswap_data;
+ codeswap_len = ar->normal_mode_fw.fw_file.codeswap_len;
- if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data)
+ if (!codeswap_len || !codeswap_data)
return 0;
- seg_info = ath10k_swap_code_seg_alloc(ar,
- ar->swap.firmware_codeswap_len);
+ seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
if (!seg_info) {
ath10k_err(ar, "failed to allocate fw code swap segment\n");
return -ENOMEM;
}
ret = ath10k_swap_code_seg_fill(ar, seg_info,
- ar->swap.firmware_codeswap_data,
- ar->swap.firmware_codeswap_len);
+ codeswap_data, codeswap_len);
if (ret) {
ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/swap.h b/drivers/net/wireless/ath/ath10k/swap.h
index 5c89952dd..36991c7b0 100644
--- a/drivers/net/wireless/ath/ath10k/swap.h
+++ b/drivers/net/wireless/ath/ath10k/swap.h
@@ -39,12 +39,6 @@ union ath10k_swap_code_seg_item {
struct ath10k_swap_code_seg_tail tail;
} __packed;
-enum ath10k_swap_code_seg_bin_type {
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW,
- ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF,
-};
-
struct ath10k_swap_code_seg_hw_info {
/* Swap binary image size */
__le32 swap_size;
@@ -64,8 +58,7 @@ struct ath10k_swap_code_seg_info {
dma_addr_t paddr[ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED];
};
-int ath10k_swap_code_seg_configure(struct ath10k *ar,
- enum ath10k_swap_code_seg_bin_type type);
+int ath10k_swap_code_seg_configure(struct ath10k *ar);
void ath10k_swap_code_seg_release(struct ath10k *ar);
int ath10k_swap_code_seg_init(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index 361f143b0..8e24099fa 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -438,7 +438,7 @@ Fw Mode/SubMode Mask
((HOST_INTEREST->hi_pwr_save_flags & HI_PWR_SAVE_LPL_ENABLED))
#define HI_DEV_LPL_TYPE_GET(_devix) \
(HOST_INTEREST->hi_pwr_save_flags & ((HI_PWR_SAVE_LPL_DEV_MASK) << \
- (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix)*2)))
+ (HI_PWR_SAVE_LPL_DEV0_LSB + (_devix) * 2)))
#define HOST_INTEREST_SMPS_IS_ALLOWED() \
((HOST_INTEREST->hi_smps_options & HI_SMPS_ALLOW_MASK))
diff --git a/drivers/net/wireless/ath/ath10k/testmode.c b/drivers/net/wireless/ath/ath10k/testmode.c
index 028e70bba..332393bf0 100644
--- a/drivers/net/wireless/ath/ath10k/testmode.c
+++ b/drivers/net/wireless/ath/ath10k/testmode.c
@@ -139,127 +139,8 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
return cfg80211_testmode_reply(skb);
}
-static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
-{
- size_t len, magic_len, ie_len;
- struct ath10k_fw_ie *hdr;
- char filename[100];
- __le32 *version;
- const u8 *data;
- int ie_id, ret;
-
- snprintf(filename, sizeof(filename), "%s/%s",
- ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
-
- /* load utf firmware image */
- ret = reject_firmware(&ar->testmode.utf, filename, ar->dev);
- if (ret) {
- ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
- filename, ret);
- return ret;
- }
-
- data = ar->testmode.utf->data;
- len = ar->testmode.utf->size;
-
- /* FIXME: call release_firmware() in error cases */
-
- /* magic also includes the null byte, check that as well */
- magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
-
- if (len < magic_len) {
- ath10k_err(ar, "utf firmware file is too small to contain magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
- ath10k_err(ar, "invalid firmware magic\n");
- ret = -EINVAL;
- goto err;
- }
-
- /* jump over the padding */
- magic_len = ALIGN(magic_len, 4);
-
- len -= magic_len;
- data += magic_len;
-
- /* loop elements */
- while (len > sizeof(struct ath10k_fw_ie)) {
- hdr = (struct ath10k_fw_ie *)data;
-
- ie_id = le32_to_cpu(hdr->id);
- ie_len = le32_to_cpu(hdr->len);
-
- len -= sizeof(*hdr);
- data += sizeof(*hdr);
-
- if (len < ie_len) {
- ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
- ie_id, len, ie_len);
- ret = -EINVAL;
- goto err;
- }
-
- switch (ie_id) {
- case ATH10K_FW_IE_FW_VERSION:
- if (ie_len > sizeof(ar->testmode.utf_version) - 1)
- break;
-
- memcpy(ar->testmode.utf_version, data, ie_len);
- ar->testmode.utf_version[ie_len] = '\0';
-
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw utf version %s\n",
- ar->testmode.utf_version);
- break;
- case ATH10K_FW_IE_TIMESTAMP:
- /* ignore timestamp, but don't warn about it either */
- break;
- case ATH10K_FW_IE_FW_IMAGE:
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
- "testmode found fw image ie (%zd B)\n",
- ie_len);
-
- ar->testmode.utf_firmware_data = data;
- ar->testmode.utf_firmware_len = ie_len;
- break;
- case ATH10K_FW_IE_WMI_OP_VERSION:
- if (ie_len != sizeof(u32))
- break;
- version = (__le32 *)data;
- ar->testmode.op_version = le32_to_cpup(version);
- ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
- ar->testmode.op_version);
- break;
- default:
- ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
- le32_to_cpu(hdr->id));
- break;
- }
- /* jump over the padding */
- ie_len = ALIGN(ie_len, 4);
-
- len -= ie_len;
- data += ie_len;
- }
-
- if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
- ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
- ret = -EINVAL;
- goto err;
- }
-
- return 0;
-
-err:
- release_firmware(ar->testmode.utf);
-
- return ret;
-}
-
-static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
+static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar,
+ struct ath10k_fw_file *fw_file)
{
char filename[100];
int ret;
@@ -268,7 +149,7 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
- ret = reject_firmware(&ar->testmode.utf, filename, ar->dev);
+ ret = reject_firmware(&fw_file->firmware, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
@@ -281,24 +162,27 @@ static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
* correct WMI interface.
*/
- ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
- ar->testmode.utf_firmware_data = ar->testmode.utf->data;
- ar->testmode.utf_firmware_len = ar->testmode.utf->size;
+ fw_file->wmi_op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
+ fw_file->htt_op_version = ATH10K_FW_HTT_OP_VERSION_10_1;
+ fw_file->firmware_data = fw_file->firmware->data;
+ fw_file->firmware_len = fw_file->firmware->size;
return 0;
}
static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
+ struct ath10k_fw_components *utf_mode_fw;
int ret;
- ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
+ ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_UTF_API2_FILE,
+ &ar->testmode.utf_mode_fw.fw_file);
if (ret == 0) {
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
- return 0;
+ goto out;
}
- ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
+ ret = ath10k_tm_fetch_utf_firmware_api_1(ar, &ar->testmode.utf_mode_fw.fw_file);
if (ret) {
ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
return ret;
@@ -306,6 +190,21 @@ static int ath10k_tm_fetch_firmware(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
+out:
+ utf_mode_fw = &ar->testmode.utf_mode_fw;
+
+ /* Use the same board data file as the normal firmware uses (but
+ * it's still "owned" by normal_mode_fw so we shouldn't free it.
+ */
+ utf_mode_fw->board_data = ar->normal_mode_fw.board_data;
+ utf_mode_fw->board_len = ar->normal_mode_fw.board_len;
+
+ if (!utf_mode_fw->fw_file.otp_data) {
+ ath10k_info(ar, "/*(DEBLOBBED)*/ didn't contain otp binary, taking it from the normal mode firmware");
+ utf_mode_fw->fw_file.otp_data = ar->normal_mode_fw.fw_file.otp_data;
+ utf_mode_fw->fw_file.otp_len = ar->normal_mode_fw.fw_file.otp_len;
+ }
+
return 0;
}
@@ -329,7 +228,7 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
- if (WARN_ON(ar->testmode.utf != NULL)) {
+ if (WARN_ON(ar->testmode.utf_mode_fw.fw_file.firmware != NULL)) {
/* utf image is already downloaded, it shouldn't be */
ret = -EEXIST;
goto err;
@@ -344,27 +243,19 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
- BUILD_BUG_ON(sizeof(ar->fw_features) !=
- sizeof(ar->testmode.orig_fw_features));
-
- memcpy(ar->testmode.orig_fw_features, ar->fw_features,
- sizeof(ar->fw_features));
- ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
- memset(ar->fw_features, 0, sizeof(ar->fw_features));
-
- ar->wmi.op_version = ar->testmode.op_version;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
- ar->wmi.op_version);
+ ar->testmode.utf_mode_fw.fw_file.wmi_op_version);
ret = ath10k_hif_power_up(ar);
if (ret) {
ath10k_err(ar, "failed to power up hif (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
- goto err_fw_features;
+ goto err_release_utf_mode_fw;
}
- ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF);
+ ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_UTF,
+ &ar->testmode.utf_mode_fw);
if (ret) {
ath10k_err(ar, "failed to start core (testmode): %d\n", ret);
ar->state = ATH10K_STATE_OFF;
@@ -373,8 +264,8 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ar->state = ATH10K_STATE_UTF;
- if (strlen(ar->testmode.utf_version) > 0)
- ver = ar->testmode.utf_version;
+ if (strlen(ar->testmode.utf_mode_fw.fw_file.fw_version) > 0)
+ ver = ar->testmode.utf_mode_fw.fw_file.fw_version;
else
ver = "API 1";
@@ -387,14 +278,9 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
err_power_down:
ath10k_hif_power_down(ar);
-err_fw_features:
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+err_release_utf_mode_fw:
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
err:
mutex_unlock(&ar->conf_mutex);
@@ -415,13 +301,8 @@ static void __ath10k_tm_cmd_utf_stop(struct ath10k *ar)
spin_unlock_bh(&ar->data_lock);
- /* return the original firmware features */
- memcpy(ar->fw_features, ar->testmode.orig_fw_features,
- sizeof(ar->fw_features));
- ar->wmi.op_version = ar->testmode.orig_wmi_op_version;
-
- release_firmware(ar->testmode.utf);
- ar->testmode.utf = NULL;
+ release_firmware(ar->testmode.utf_mode_fw.fw_file.firmware);
+ ar->testmode.utf_mode_fw.fw_file.firmware = NULL;
ar->state = ATH10K_STATE_OFF;
}
diff --git a/drivers/net/wireless/ath/ath10k/thermal.h b/drivers/net/wireless/ath/ath10k/thermal.h
index c9223e9e9..3abb97f63 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.h
+++ b/drivers/net/wireless/ath/ath10k/thermal.h
@@ -20,7 +20,7 @@
#define ATH10K_QUIET_PERIOD_MIN 25
#define ATH10K_QUIET_START_OFFSET 10
#define ATH10K_HWMON_NAME_LEN 15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5*HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
#define ATH10K_THERMAL_THROTTLE_MAX 100
struct ath10k_thermal {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index fbfb608e4..576e7c42e 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -49,25 +49,25 @@ out:
spin_unlock_bh(&ar->data_lock);
}
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done)
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done)
{
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
+ struct ieee80211_txq *txq;
struct ath10k_skb_cb *skb_cb;
+ struct ath10k_txq *artxq;
struct sk_buff *msdu;
- bool limit_mgmt_desc = false;
ath10k_dbg(ar, ATH10K_DBG_HTT,
- "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
- tx_done->msdu_id, !!tx_done->discard,
- !!tx_done->no_ack, !!tx_done->success);
+ "htt tx completion msdu_id %u status %d\n",
+ tx_done->msdu_id, tx_done->status);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
- return;
+ return -EINVAL;
}
spin_lock_bh(&htt->tx_lock);
@@ -76,17 +76,18 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
- return;
+ return -ENOENT;
}
skb_cb = ATH10K_SKB_CB(msdu);
+ txq = skb_cb->txq;
+ artxq = (void *)txq->drv_priv;
- if (unlikely(skb_cb->flags & ATH10K_SKB_F_MGMT) &&
- ar->hw_params.max_probe_resp_desc_thres)
- limit_mgmt_desc = true;
+ if (txq)
+ artxq->num_fw_queued--;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
- __ath10k_htt_tx_dec_pending(htt, limit_mgmt_desc);
+ ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0)
wake_up(&htt->empty_tx_wq);
spin_unlock_bh(&htt->tx_lock);
@@ -99,22 +100,24 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
memset(&info->status, 0, sizeof(info->status));
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
- if (tx_done->discard) {
+ if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
ieee80211_free_txskb(htt->ar->hw, msdu);
- return;
+ return 0;
}
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- if (tx_done->no_ack)
+ if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
- if (tx_done->success && (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status(htt->ar->hw, msdu);
/* we do not own the msdu anymore */
+ return 0;
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
@@ -127,7 +130,7 @@ struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
- if (memcmp(peer->addr, addr, ETH_ALEN))
+ if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
@@ -163,7 +166,7 @@ static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
- }), 3*HZ);
+ }), 3 * HZ);
if (time_left == 0)
return -ETIMEDOUT;
@@ -187,6 +190,13 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer map event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
@@ -203,6 +213,7 @@ void ath10k_peer_map_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
+ ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
@@ -214,6 +225,13 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
+ if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
+ ath10k_warn(ar,
+ "received htt peer unmap event with idx out of bounds: %hu\n",
+ ev->peer_id);
+ return;
+ }
+
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
@@ -225,6 +243,7 @@ void ath10k_peer_unmap_event(struct ath10k_htt *htt,
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
+ ar->peer_map[ev->peer_id] = NULL;
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
diff --git a/drivers/net/wireless/ath/ath10k/txrx.h b/drivers/net/wireless/ath/ath10k/txrx.h
index a90e09f5c..e7ea1ae1c 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.h
+++ b/drivers/net/wireless/ath/ath10k/txrx.h
@@ -19,8 +19,8 @@
#include "htt.h"
-void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
- const struct htt_tx_done *tx_done);
+int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
+ const struct htt_tx_done *tx_done);
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 32ab34edc..64ebd304f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -186,8 +186,14 @@ struct wmi_ops {
u8 enable,
u32 detect_level,
u32 detect_margin);
+ struct sk_buff *(*ext_resource_config)(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap);
int (*get_vdev_subtype)(struct ath10k *ar,
enum wmi_vdev_subtype subtype);
+ struct sk_buff *(*gen_pdev_bss_chan_info_req)
+ (struct ath10k *ar,
+ enum wmi_bss_survey_req_type type);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1330,6 +1336,26 @@ ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
}
static inline int
+ath10k_wmi_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->ext_resource_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->ext_resource_config(ar, type,
+ fw_feature_bitmap);
+
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ ar->wmi.cmd->ext_resource_cfg_cmdid);
+}
+
+static inline int
ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
{
if (!ar->wmi.ops->get_vdev_subtype)
@@ -1338,4 +1364,22 @@ ath10k_wmi_get_vdev_subtype(struct ath10k *ar, enum wmi_vdev_subtype subtype)
return ar->wmi.ops->get_vdev_subtype(ar, subtype);
}
+static inline int
+ath10k_wmi_pdev_bss_chan_info_request(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct ath10k_wmi *wmi = &ar->wmi;
+ struct sk_buff *skb;
+
+ if (!wmi->ops->gen_pdev_bss_chan_info_req)
+ return -EOPNOTSUPP;
+
+ skb = wmi->ops->gen_pdev_bss_chan_info_req(ar, type);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb,
+ wmi->cmd->pdev_bss_chan_info_request_cmdid);
+}
+
#endif
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index 108593202..e09337ee7 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -3409,6 +3409,7 @@ static struct wmi_vdev_param_map wmi_tlv_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static const struct wmi_ops wmi_tlv_ops = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
index dd6785905..b8aa60005 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
@@ -968,8 +968,8 @@ enum wmi_tlv_service {
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 70261387d..2c300329e 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -521,7 +521,8 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.vdev_filter_neighbor_rx_packets_cmdid = WMI_CMD_UNSUPPORTED,
.mu_cal_start_cmdid = WMI_CMD_UNSUPPORTED,
.set_cca_params_cmdid = WMI_CMD_UNSUPPORTED,
- .pdev_bss_chan_info_request_cmdid = WMI_CMD_UNSUPPORTED,
+ .pdev_bss_chan_info_request_cmdid =
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
};
/* 10.4 WMI cmd track */
@@ -705,6 +706,7 @@ static struct wmi_cmd_map wmi_10_4_cmd_map = {
.set_cca_params_cmdid = WMI_10_4_SET_CCA_PARAMS_CMDID,
.pdev_bss_chan_info_request_cmdid =
WMI_10_4_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
+ .ext_resource_cfg_cmdid = WMI_10_4_EXT_RESOURCE_CFG_CMDID,
};
/* MAIN WMI VDEV param map */
@@ -780,6 +782,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
@@ -855,6 +858,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_VDEV_PARAM_UNSUPPORTED,
};
static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
@@ -929,6 +933,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = {
.meru_vc = WMI_VDEV_PARAM_UNSUPPORTED,
.rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED,
+ .set_tsf = WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
@@ -1004,6 +1009,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = {
.meru_vc = WMI_10_4_VDEV_PARAM_MERU_VC,
.rx_decap_type = WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
.bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ .set_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
@@ -1628,6 +1634,7 @@ void ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
ch->antenna_max = arg->max_antenna_gain;
+ ch->max_tx_power = arg->max_power;
/* mode & flags share storage */
ch->mode = arg->mode;
@@ -1803,7 +1810,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
ret = -ESHUTDOWN;
(ret != -EAGAIN);
- }), 3*HZ);
+ }), 3 * HZ);
if (ret)
dev_kfree_skb_any(skb);
@@ -2099,34 +2106,6 @@ int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
-static inline enum ieee80211_band phy_mode_to_band(u32 phy_mode)
-{
- enum ieee80211_band band;
-
- switch (phy_mode) {
- case MODE_11A:
- case MODE_11NA_HT20:
- case MODE_11NA_HT40:
- case MODE_11AC_VHT20:
- case MODE_11AC_VHT40:
- case MODE_11AC_VHT80:
- band = IEEE80211_BAND_5GHZ;
- break;
- case MODE_11G:
- case MODE_11B:
- case MODE_11GONLY:
- case MODE_11NG_HT20:
- case MODE_11NG_HT40:
- case MODE_11AC_VHT20_2G:
- case MODE_11AC_VHT40_2G:
- case MODE_11AC_VHT80_2G:
- default:
- band = IEEE80211_BAND_2GHZ;
- }
-
- return band;
-}
-
/* If keys are configured, HW decrypts all frames
* with protected bit set. Mark such frames as decrypted.
*/
@@ -2167,10 +2146,13 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
struct wmi_mgmt_rx_event_v1 *ev_v1;
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
+ struct wmi_mgmt_rx_ext_info *ext_info;
size_t pull_len;
u32 msdu_len;
+ u32 len;
- if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features)) {
+ if (test_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX,
+ ar->running_fw->fw_file.fw_features)) {
ev_v2 = (struct wmi_mgmt_rx_event_v2 *)skb->data;
ev_hdr = &ev_v2->hdr.v1;
pull_len = sizeof(*ev_v2);
@@ -2195,6 +2177,12 @@ static int ath10k_wmi_op_pull_mgmt_rx_ev(struct ath10k *ar, struct sk_buff *skb,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
/* the WMI buffer might've ended up being padded to 4 bytes due to HTC
* trailer with credit update. Trim the excess garbage.
*/
@@ -2211,6 +2199,8 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
struct wmi_10_4_mgmt_rx_hdr *ev_hdr;
size_t pull_len;
u32 msdu_len;
+ struct wmi_mgmt_rx_ext_info *ext_info;
+ u32 len;
ev = (struct wmi_10_4_mgmt_rx_event *)skb->data;
ev_hdr = &ev->hdr;
@@ -2231,6 +2221,13 @@ static int ath10k_wmi_10_4_op_pull_mgmt_rx_ev(struct ath10k *ar,
if (skb->len < msdu_len)
return -EPROTO;
+ if (le32_to_cpu(arg->status) & WMI_RX_STATUS_EXT_INFO) {
+ len = ALIGN(le32_to_cpu(arg->buf_len), 4);
+ ext_info = (struct wmi_mgmt_rx_ext_info *)(skb->data + len);
+ memcpy(&arg->ext_info, ext_info,
+ sizeof(struct wmi_mgmt_rx_ext_info));
+ }
+
/* Make sure bytes added for padding are removed. */
skb_trim(skb, msdu_len);
@@ -2281,14 +2278,19 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
+ if (rx_status & WMI_RX_STATUS_EXT_INFO) {
+ status->mactime =
+ __le64_to_cpu(arg.ext_info.rx_mac_timestamp);
+ status->flag |= RX_FLAG_MACTIME_END;
+ }
/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
* of mgmt rx.
*/
if (channel >= 1 && channel <= 14) {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
} else if (channel >= 36 && channel <= 165) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
} else {
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
@@ -2298,7 +2300,7 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
- if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
+ if (phy_mode == MODE_11B && status->band == NL80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
sband = &ar->mac.sbands[status->band];
@@ -2310,6 +2312,12 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
+ /* Firmware is guaranteed to report all essential management frames via
+ * WMI while it can deliver some extra via HTT. Since there can be
+ * duplicates split the reporting wrt monitor/sniffing.
+ */
+ status->flag |= RX_FLAG_SKIP_MONITOR;
+
ath10k_wmi_handle_wep_reauth(ar, skb, status);
/* FW delivers WEP Shared Auth frame with Protected Bit set and
@@ -2351,7 +2359,7 @@ static int freq_to_idx(struct ath10k *ar, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = ar->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -2612,6 +2620,16 @@ void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
+static void
+ath10k_wmi_10_4_pull_peer_stats(const struct wmi_10_4_peer_stats *src,
+ struct ath10k_fw_stats_peer *dst)
+{
+ ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
+ dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
+ dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
+ dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+}
+
static int ath10k_wmi_main_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
@@ -2865,11 +2883,8 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
const struct wmi_10_2_4_ext_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
int stats_len;
- bool ext_peer_stats_support;
- ext_peer_stats_support = test_bit(WMI_SERVICE_PEER_STATS,
- ar->wmi.svc_map);
- if (ext_peer_stats_support)
+ if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
stats_len = sizeof(struct wmi_10_2_4_ext_peer_stats);
else
stats_len = sizeof(struct wmi_10_2_4_peer_stats);
@@ -2886,7 +2901,7 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
dst->peer_rx_rate = __le32_to_cpu(src->common.peer_rx_rate);
- if (ext_peer_stats_support)
+ if (ath10k_peer_stats_enabled(ar))
dst->rx_duration = __le32_to_cpu(src->rx_duration);
/* FIXME: expose 10.2 specific values */
@@ -2905,6 +2920,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
+ u32 stats_id;
int i;
if (!skb_pull(skb, sizeof(*ev)))
@@ -2914,6 +2930,7 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
+ stats_id = __le32_to_cpu(ev->stats_id);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10_4_pdev_stats *src;
@@ -2953,22 +2970,28 @@ static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
- const struct wmi_10_4_peer_stats *src;
+ const struct wmi_10_4_peer_extd_stats *src;
struct ath10k_fw_stats_peer *dst;
+ int stats_len;
+ bool extd_peer_stats = !!(stats_id & WMI_10_4_STAT_PEER_EXTD);
+
+ if (extd_peer_stats)
+ stats_len = sizeof(struct wmi_10_4_peer_extd_stats);
+ else
+ stats_len = sizeof(struct wmi_10_4_peer_stats);
src = (void *)skb->data;
- if (!skb_pull(skb, sizeof(*src)))
+ if (!skb_pull(skb, stats_len))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
- ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
- dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
- dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
- dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
+ ath10k_wmi_10_4_pull_peer_stats(&src->common, dst);
/* FIXME: expose 10.4 specific values */
+ if (extd_peer_stats)
+ dst->rx_duration = __le32_to_cpu(src->rx_duration);
list_add_tail(&dst->list, &stats->peers);
}
@@ -4584,10 +4607,6 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, arg.service_map_len);
- /* only manually set fw features when not using FW IE format */
- if (ar->fw_api == 1 && ar->fw_version_build > 636)
- set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
-
if (ar->num_rf_chains > ar->max_spatial_stream) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, ar->max_spatial_stream);
@@ -4617,10 +4636,16 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
}
if (test_bit(WMI_SERVICE_PEER_CACHING, ar->wmi.svc_map)) {
+ if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
+ ar->running_fw->fw_file.fw_features))
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS_PFC +
+ ar->max_num_vdevs;
+ else
+ ar->num_active_peers = TARGET_10_4_QCACHE_ACTIVE_PEERS +
+ ar->max_num_vdevs;
+
ar->max_num_peers = TARGET_10_4_NUM_QCACHE_PEERS_MAX +
ar->max_num_vdevs;
- ar->num_active_peers = ar->hw_params.qcache_active_peers +
- ar->max_num_vdevs;
ar->num_tids = ar->num_active_peers * 2;
ar->max_num_stations = TARGET_10_4_NUM_QCACHE_PEERS_MAX;
}
@@ -4769,6 +4794,58 @@ static int ath10k_wmi_event_temperature(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int ath10k_wmi_event_pdev_bss_chan_info(struct ath10k *ar,
+ struct sk_buff *skb)
+{
+ struct wmi_pdev_bss_chan_info_event *ev;
+ struct survey_info *survey;
+ u64 busy, total, tx, rx, rx_bss;
+ u32 freq, noise_floor;
+ u32 cc_freq_hz = ar->hw_params.channel_counters_freq_hz;
+ int idx;
+
+ ev = (struct wmi_pdev_bss_chan_info_event *)skb->data;
+ if (WARN_ON(skb->len < sizeof(*ev)))
+ return -EPROTO;
+
+ freq = __le32_to_cpu(ev->freq);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ busy = __le64_to_cpu(ev->cycle_busy);
+ total = __le64_to_cpu(ev->cycle_total);
+ tx = __le64_to_cpu(ev->cycle_tx);
+ rx = __le64_to_cpu(ev->cycle_rx);
+ rx_bss = __le64_to_cpu(ev->cycle_rx_bss);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi event pdev bss chan info:\n freq: %d noise: %d cycle: busy %llu total %llu tx %llu rx %llu rx_bss %llu\n",
+ freq, noise_floor, busy, total, tx, rx, rx_bss);
+
+ spin_lock_bh(&ar->data_lock);
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn(ar, "bss chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ survey = &ar->survey[idx];
+
+ survey->noise = noise_floor;
+ survey->time = div_u64(total, cc_freq_hz);
+ survey->time_busy = div_u64(busy, cc_freq_hz);
+ survey->time_rx = div_u64(rx_bss, cc_freq_hz);
+ survey->time_tx = div_u64(tx, cc_freq_hz);
+ survey->filled |= (SURVEY_INFO_NOISE_DBM |
+ SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX);
+exit:
+ spin_unlock_bh(&ar->data_lock);
+ complete(&ar->bss_survey_done);
+ return 0;
+}
+
static void ath10k_wmi_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -5112,6 +5189,9 @@ static void ath10k_wmi_10_2_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_2_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
+ case WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
case WMI_10_2_PEER_RATECODE_LIST_EVENTID:
@@ -5189,6 +5269,7 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
case WMI_10_4_WOW_WAKEUP_HOST_EVENTID:
+ case WMI_10_4_PEER_RATECODE_LIST_EVENTID:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
@@ -5198,6 +5279,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
case WMI_10_4_PDEV_TEMPERATURE_EVENTID:
ath10k_wmi_event_temperature(ar, skb);
break;
+ case WMI_10_4_PDEV_BSS_CHAN_INFO_EVENTID:
+ ath10k_wmi_event_pdev_bss_chan_info(ar, skb);
+ break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
@@ -5517,7 +5601,8 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map)) {
+
+ if (ath10k_peer_stats_enabled(ar)) {
config.num_peers = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_PEERS);
config.num_tids = __cpu_to_le32(TARGET_10X_TX_STATS_NUM_TIDS);
} else {
@@ -5579,9 +5664,12 @@ static struct sk_buff *ath10k_wmi_10_2_op_gen_init(struct ath10k *ar)
test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map))
features |= WMI_10_2_COEX_GPIO;
- if (test_bit(WMI_SERVICE_PEER_STATS, ar->wmi.svc_map))
+ if (ath10k_peer_stats_enabled(ar))
features |= WMI_10_2_PEER_STATS;
+ if (test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map))
+ features |= WMI_10_2_BSS_CHAN_INFO;
+
cmd->resource_config.feature_mask = __cpu_to_le32(features);
memcpy(&cmd->resource_config.common, &config, sizeof(config));
@@ -5800,9 +5888,8 @@ ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
for (i = 0; i < arg->n_bssids; i++)
- memcpy(&bssids->bssid_list[i],
- arg->bssids[i].bssid,
- ETH_ALEN);
+ ether_addr_copy(bssids->bssid_list[i].addr,
+ arg->bssids[i].bssid);
ptr += sizeof(*bssids);
ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
@@ -6613,6 +6700,26 @@ ath10k_wmi_10_2_op_gen_pdev_get_temperature(struct ath10k *ar)
return skb;
}
+static struct sk_buff *
+ath10k_wmi_10_2_op_gen_pdev_bss_chan_info(struct ath10k *ar,
+ enum wmi_bss_survey_req_type type)
+{
+ struct wmi_pdev_chan_info_req_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_pdev_chan_info_req_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi pdev bss info request type %d\n", type);
+
+ return skb;
+}
+
/* This function assumes the beacon is already DMA mapped */
static struct sk_buff *
ath10k_wmi_op_gen_beacon_dma(struct ath10k *ar, u32 vdev_id, const void *bcn,
@@ -7484,6 +7591,28 @@ static int ath10k_wmi_10_4_op_get_vdev_subtype(struct ath10k *ar,
return -ENOTSUPP;
}
+static struct sk_buff *
+ath10k_wmi_10_4_ext_resource_config(struct ath10k *ar,
+ enum wmi_host_platform_type type,
+ u32 fw_feature_bitmap)
+{
+ struct wmi_ext_resource_config_10_4_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_ext_resource_config_10_4_cmd *)skb->data;
+ cmd->host_platform_config = __cpu_to_le32(type);
+ cmd->fw_feature_bitmap = __cpu_to_le32(fw_feature_bitmap);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI,
+ "wmi ext resource config host type %d firmware feature bitmap %08x\n",
+ type, fw_feature_bitmap);
+ return skb;
+}
+
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
@@ -7690,6 +7819,7 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_init = ath10k_wmi_10_2_op_gen_init,
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
/* shared with 10.1 */
.map_svc = wmi_10x_svc_map,
@@ -7810,16 +7940,18 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
+ .ext_resource_config = ath10k_wmi_10_4_ext_resource_config,
/* shared with 10.2 */
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
.gen_pdev_get_temperature = ath10k_wmi_10_2_op_gen_pdev_get_temperature,
.get_vdev_subtype = ath10k_wmi_10_4_op_get_vdev_subtype,
+ .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
};
int ath10k_wmi_attach(struct ath10k *ar)
{
- switch (ar->wmi.op_version) {
+ switch (ar->running_fw->fw_file.wmi_op_version) {
case ATH10K_FW_WMI_OP_VERSION_10_4:
ar->wmi.ops = &wmi_10_4_ops;
ar->wmi.cmd = &wmi_10_4_cmd_map;
@@ -7861,7 +7993,7 @@ int ath10k_wmi_attach(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_UNSET:
case ATH10K_FW_WMI_OP_VERSION_MAX:
ath10k_err(ar, "unsupported WMI op version: %d\n",
- ar->wmi.op_version);
+ ar->running_fw->fw_file.wmi_op_version);
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 4d3cbc44f..9fdf47ea2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -180,6 +180,9 @@ enum wmi_service {
WMI_SERVICE_MESH_NON_11S,
WMI_SERVICE_PEER_STATS,
WMI_SERVICE_RESTRT_CHNL_SUPPORT,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_DYNAMIC,
/* keep last */
WMI_SERVICE_MAX,
@@ -302,6 +305,9 @@ enum wmi_10_4_service {
WMI_10_4_SERVICE_RESTRT_CHNL_SUPPORT,
WMI_10_4_SERVICE_PEER_STATS,
WMI_10_4_SERVICE_MESH_11S,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
};
static inline char *wmi_service_name(int service_id)
@@ -396,6 +402,9 @@ static inline char *wmi_service_name(int service_id)
SVCSTR(WMI_SERVICE_MESH_NON_11S);
SVCSTR(WMI_SERVICE_PEER_STATS);
SVCSTR(WMI_SERVICE_RESTRT_CHNL_SUPPORT);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_ONLY);
+ SVCSTR(WMI_SERVICE_TX_MODE_PUSH_PULL);
+ SVCSTR(WMI_SERVICE_TX_MODE_DYNAMIC);
default:
return NULL;
}
@@ -405,8 +414,8 @@ static inline char *wmi_service_name(int service_id)
#define WMI_SERVICE_IS_ENABLED(wmi_svc_bmap, svc_id, len) \
((svc_id) < (len) && \
- __le32_to_cpu((wmi_svc_bmap)[(svc_id)/(sizeof(u32))]) & \
- BIT((svc_id)%(sizeof(u32))))
+ __le32_to_cpu((wmi_svc_bmap)[(svc_id) / (sizeof(u32))]) & \
+ BIT((svc_id) % (sizeof(u32))))
#define SVCMAP(x, y, len) \
do { \
@@ -643,6 +652,12 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out,
WMI_SERVICE_PEER_STATS, len);
SVCMAP(WMI_10_4_SERVICE_MESH_11S,
WMI_SERVICE_MESH_11S, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_ONLY,
+ WMI_SERVICE_TX_MODE_PUSH_ONLY, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_PUSH_PULL,
+ WMI_SERVICE_TX_MODE_PUSH_PULL, len);
+ SVCMAP(WMI_10_4_SERVICE_TX_MODE_DYNAMIC,
+ WMI_SERVICE_TX_MODE_DYNAMIC, len);
}
#undef SVCMAP
@@ -816,6 +831,7 @@ struct wmi_cmd_map {
u32 set_cca_params_cmdid;
u32 pdev_bss_chan_info_request_cmdid;
u32 pdev_enable_adaptive_cca_cmdid;
+ u32 ext_resource_cfg_cmdid;
};
/*
@@ -1308,7 +1324,7 @@ enum wmi_10x_event_id {
WMI_10X_PDEV_TPC_CONFIG_EVENTID,
WMI_10X_GPIO_INPUT_EVENTID,
- WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
+ WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID - 1,
};
enum wmi_10_2_cmd_id {
@@ -1428,6 +1444,7 @@ enum wmi_10_2_cmd_id {
WMI_10_2_MU_CAL_START_CMDID,
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
@@ -1471,6 +1488,8 @@ enum wmi_10_2_event_id {
WMI_10_2_WDS_PEER_EVENTID,
WMI_10_2_PEER_STA_PS_STATECHG_EVENTID,
WMI_10_2_PDEV_TEMPERATURE_EVENTID,
+ WMI_10_2_MU_REPORT_EVENTID,
+ WMI_10_2_PDEV_BSS_CHAN_INFO_EVENTID,
WMI_10_2_PDEV_UTF_EVENTID = WMI_10_2_END_EVENTID - 1,
};
@@ -1779,6 +1798,7 @@ struct wmi_channel {
__le32 reginfo1;
struct {
u8 antenna_max;
+ u8 max_tx_power;
} __packed;
} __packed;
} __packed;
@@ -2041,8 +2061,8 @@ struct wmi_10x_service_ready_event {
struct wlan_host_mem_req mem_reqs[0];
} __packed;
-#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
-#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
+#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
+#define WMI_UNIFIED_READY_TIMEOUT_HZ (5 * HZ)
struct wmi_ready_event {
__le32 sw_version;
@@ -2434,6 +2454,7 @@ enum wmi_10_2_feature_mask {
WMI_10_2_RX_BATCH_MODE = BIT(0),
WMI_10_2_ATF_CONFIG = BIT(1),
WMI_10_2_COEX_GPIO = BIT(3),
+ WMI_10_2_BSS_CHAN_INFO = BIT(6),
WMI_10_2_PEER_STATS = BIT(7),
};
@@ -2660,13 +2681,43 @@ struct wmi_resource_config_10_4 {
*/
__le32 iphdr_pad_config;
- /* qwrap configuration
+ /* qwrap configuration (bits 15-0)
* 1 - This is qwrap configuration
* 0 - This is not qwrap
+ *
+ * Bits 31-16 is alloc_frag_desc_for_data_pkt (1 enables, 0 disables)
+ * In order to get ack-RSSI reporting and to specify the tx-rate for
+ * individual frames, this option must be enabled. This uses an extra
+ * 4 bytes per tx-msdu descriptor, so don't enable it unless you need it.
*/
__le32 qwrap_config;
} __packed;
+/**
+ * enum wmi_10_4_feature_mask - WMI 10.4 feature enable/disable flags
+ * @WMI_10_4_LTEU_SUPPORT: LTEU config
+ * @WMI_10_4_COEX_GPIO_SUPPORT: COEX GPIO config
+ * @WMI_10_4_AUX_RADIO_SPECTRAL_INTF: AUX Radio Enhancement for spectral scan
+ * @WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF: AUX Radio Enhancement for chan load scan
+ * @WMI_10_4_BSS_CHANNEL_INFO_64: BSS channel info stats
+ * @WMI_10_4_PEER_STATS: Per station stats
+ */
+enum wmi_10_4_feature_mask {
+ WMI_10_4_LTEU_SUPPORT = BIT(0),
+ WMI_10_4_COEX_GPIO_SUPPORT = BIT(1),
+ WMI_10_4_AUX_RADIO_SPECTRAL_INTF = BIT(2),
+ WMI_10_4_AUX_RADIO_CHAN_LOAD_INTF = BIT(3),
+ WMI_10_4_BSS_CHANNEL_INFO_64 = BIT(4),
+ WMI_10_4_PEER_STATS = BIT(5),
+};
+
+struct wmi_ext_resource_config_10_4_cmd {
+ /* contains enum wmi_host_platform_type */
+ __le32 host_platform_config;
+ /* see enum wmi_10_4_feature_mask */
+ __le32 fw_feature_bitmap;
+};
+
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
@@ -3037,11 +3088,17 @@ struct wmi_10_4_mgmt_rx_event {
u8 buf[0];
} __packed;
+struct wmi_mgmt_rx_ext_info {
+ __le64 rx_mac_timestamp;
+} __packed __aligned(4);
+
#define WMI_RX_STATUS_OK 0x00
#define WMI_RX_STATUS_ERR_CRC 0x01
#define WMI_RX_STATUS_ERR_DECRYPT 0x08
#define WMI_RX_STATUS_ERR_MIC 0x10
#define WMI_RX_STATUS_ERR_KEY_CACHE_MISS 0x20
+/* Extension data at the end of mgmt frame */
+#define WMI_RX_STATUS_EXT_INFO 0x40
#define PHY_ERROR_GEN_SPECTRAL_SCAN 0x26
#define PHY_ERROR_GEN_FALSE_RADAR_EXT 0x24
@@ -4072,6 +4129,13 @@ enum wmi_stats_id {
WMI_STAT_VDEV_RATE = BIT(5),
};
+enum wmi_10_4_stats_id {
+ WMI_10_4_STAT_PEER = BIT(0),
+ WMI_10_4_STAT_AP = BIT(1),
+ WMI_10_4_STAT_INST = BIT(2),
+ WMI_10_4_STAT_PEER_EXTD = BIT(3),
+};
+
struct wlan_inst_rssi_args {
__le16 cfg_retry_count;
__le16 retry_count;
@@ -4271,6 +4335,15 @@ struct wmi_10_4_peer_stats {
__le32 peer_rssi_changed;
} __packed;
+struct wmi_10_4_peer_extd_stats {
+ struct wmi_10_4_peer_stats common;
+ struct wmi_mac_addr peer_macaddr;
+ __le32 inactive_time;
+ __le32 peer_chain_rssi;
+ __le32 rx_duration;
+ __le32 reserved[10];
+} __packed;
+
struct wmi_10_2_pdev_ext_stats {
__le32 rx_rssi_comb;
__le32 rx_rssi[4];
@@ -4336,14 +4409,14 @@ enum wmi_vdev_subtype_10_4 {
/*
* Indicates that AP VDEV uses hidden ssid. only valid for
* AP/GO */
-#define WMI_VDEV_START_HIDDEN_SSID (1<<0)
+#define WMI_VDEV_START_HIDDEN_SSID (1 << 0)
/*
* Indicates if robust management frame/management frame
* protection is enabled. For GO/AP vdevs, it indicates that
* it may support station/client associations with RMF enabled.
* For STA/client vdevs, it indicates that sta will
* associate with AP with RMF enabled. */
-#define WMI_VDEV_START_PMF_ENABLED (1<<1)
+#define WMI_VDEV_START_PMF_ENABLED (1 << 1)
struct wmi_p2p_noa_descriptor {
__le32 type_count; /* 255: continuous schedule, 0: reserved */
@@ -4582,6 +4655,7 @@ struct wmi_vdev_param_map {
u32 meru_vc;
u32 rx_decap_type;
u32 bw_nss_ratemask;
+ u32 set_tsf;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
@@ -4838,6 +4912,7 @@ enum wmi_10x_vdev_param {
WMI_10X_VDEV_PARAM_RTS_FIXED_RATE,
WMI_10X_VDEV_PARAM_VHT_SGIMASK,
WMI_10X_VDEV_PARAM_VHT80_RATEMASK,
+ WMI_10X_VDEV_PARAM_TSF_INCREMENT,
};
enum wmi_10_4_vdev_param {
@@ -4907,6 +4982,12 @@ enum wmi_10_4_vdev_param {
WMI_10_4_VDEV_PARAM_MERU_VC,
WMI_10_4_VDEV_PARAM_RX_DECAP_TYPE,
WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK,
+ WMI_10_4_VDEV_PARAM_SENSOR_AP,
+ WMI_10_4_VDEV_PARAM_BEACON_RATE,
+ WMI_10_4_VDEV_PARAM_DTIM_ENABLE_CTS,
+ WMI_10_4_VDEV_PARAM_STA_KICKOUT,
+ WMI_10_4_VDEV_PARAM_CAPABILITIES,
+ WMI_10_4_VDEV_PARAM_TSF_INCREMENT,
};
#define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0)
@@ -5281,7 +5362,7 @@ enum wmi_sta_ps_param_pspoll_count {
#define WMI_UAPSD_AC_TYPE_TRIG 1
#define WMI_UAPSD_AC_BIT_MASK(ac, type) \
- ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1<<(ac<<1)) : (1<<((ac<<1)+1)))
+ ((type == WMI_UAPSD_AC_TYPE_DELI) ? (1 << (ac << 1)) : (1 << ((ac << 1) + 1)))
enum wmi_sta_ps_param_uapsd {
WMI_STA_PS_UAPSD_AC0_DELIVERY_EN = (1 << 0),
@@ -5696,7 +5777,7 @@ struct wmi_rate_set {
* the rates are filled from least significant byte to most
* significant byte.
*/
- __le32 rates[(MAX_SUPPORTED_RATES/4)+1];
+ __le32 rates[(MAX_SUPPORTED_RATES / 4) + 1];
} __packed;
struct wmi_rate_set_arg {
@@ -6116,6 +6197,7 @@ struct wmi_mgmt_rx_ev_arg {
__le32 phy_mode;
__le32 buf_len;
__le32 status; /* %WMI_RX_STATUS_ */
+ struct wmi_mgmt_rx_ext_info ext_info;
};
struct wmi_ch_info_ev_arg {
@@ -6203,6 +6285,17 @@ struct wmi_pdev_temperature_event {
__le32 temperature;
} __packed;
+struct wmi_pdev_bss_chan_info_event {
+ __le32 freq;
+ __le32 noise_floor;
+ __le64 cycle_busy;
+ __le64 cycle_total;
+ __le64 cycle_tx;
+ __le64 cycle_rx;
+ __le64 cycle_rx_bss;
+ __le32 reserved;
+} __packed;
+
/* WOW structures */
enum wmi_wow_wakeup_event {
WOW_BMISS_EVENT = 0,
@@ -6401,6 +6494,21 @@ struct wmi_pdev_set_adaptive_cca_params {
__le32 cca_detect_margin;
} __packed;
+enum wmi_host_platform_type {
+ WMI_HOST_PLATFORM_HIGH_PERF,
+ WMI_HOST_PLATFORM_LOW_PERF,
+};
+
+enum wmi_bss_survey_req_type {
+ WMI_BSS_SURVEY_REQ_TYPE_READ = 1,
+ WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR,
+};
+
+struct wmi_pdev_chan_info_req_cmd {
+ __le32 type;
+ __le32 reserved;
+} __packed;
+
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
index 8e02b3819..77100d42f 100644
--- a/drivers/net/wireless/ath/ath10k/wow.c
+++ b/drivers/net/wireless/ath/ath10k/wow.c
@@ -233,7 +233,7 @@ int ath10k_wow_op_suspend(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -285,7 +285,7 @@ int ath10k_wow_op_resume(struct ieee80211_hw *hw)
mutex_lock(&ar->conf_mutex);
if (WARN_ON(!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
- ar->fw_features))) {
+ ar->running_fw->fw_file.fw_features))) {
ret = 1;
goto exit;
}
@@ -325,7 +325,8 @@ exit:
int ath10k_wow_init(struct ath10k *ar)
{
- if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT, ar->fw_features))
+ if (!test_bit(ATH10K_FW_FEATURE_WOWLAN_SUPPORT,
+ ar->running_fw->fw_file.fw_features))
return 0;
if (WARN_ON(!test_bit(WMI_SERVICE_WOW, ar->wmi.svc_map)))
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index 38be2702c..0624333f5 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -279,7 +279,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
- } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
+ } else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ba12f7f40..67fedb61f 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1265,10 +1265,10 @@ struct ath5k_hw {
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
struct ieee80211_hw *hw; /* IEEE 802.11 common */
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
struct ieee80211_channel channels[ATH_CHAN_MAX];
- struct ieee80211_rate rates[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
- s8 rate_idx[IEEE80211_NUM_BANDS][AR5K_MAX_RATES];
+ struct ieee80211_rate rates[NUM_NL80211_BANDS][AR5K_MAX_RATES];
+ s8 rate_idx[NUM_NL80211_BANDS][AR5K_MAX_RATES];
enum nl80211_iftype opmode;
#ifdef CONFIG_ATH5K_DEBUG
@@ -1532,7 +1532,7 @@ int ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
/* Protocol Control Unit Functions */
/* Helpers */
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+int ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre);
unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
@@ -1611,7 +1611,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
/* PHY functions */
/* Misc PHY functions */
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band);
+u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band);
int ath5k_hw_phy_disable(struct ath5k_hw *ah);
/* Gain_F optimization */
enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 66b636615..233054bd6 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -152,7 +152,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_5GHZ);
/* Try to identify radio chip based on its srev */
switch (ah->ah_radio_5ghz_revision & 0xf0) {
@@ -160,14 +160,14 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_5112:
case AR5K_SREV_RAD_2112:
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_2413:
ah->ah_radio = AR5K_RF2413;
@@ -204,7 +204,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3d946d8b2..d98fd421c 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -268,15 +268,15 @@ static void ath5k_reg_notifier(struct wiphy *wiphy,
* Returns true for the channel numbers used.
*/
#ifdef CONFIG_ATH5K_TEST_CHANNELS
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
return true;
}
#else
-static bool ath5k_is_standard_channel(short chan, enum ieee80211_band band)
+static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
- if (band == IEEE80211_BAND_2GHZ && chan <= 14)
+ if (band == NL80211_BAND_2GHZ && chan <= 14)
return true;
return /* UNII 1,2 */
@@ -297,18 +297,18 @@ ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
unsigned int mode, unsigned int max)
{
unsigned int count, size, freq, ch;
- enum ieee80211_band band;
+ enum nl80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
size = 220;
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah, "bad mode, not copying channels\n");
@@ -363,13 +363,13 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
int max_c, count_c = 0;
int i;
- BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < IEEE80211_NUM_BANDS);
+ BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
max_c = ARRAY_SIZE(ah->channels);
/* 2GHz band */
- sband = &ah->sbands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
- sband->bitrates = &ah->rates[IEEE80211_BAND_2GHZ][0];
+ sband = &ah->sbands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
+ sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
/* G mode */
@@ -381,7 +381,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
@@ -407,7 +407,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
}
@@ -415,9 +415,9 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
/* 5GHz band, A mode */
if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
- sband = &ah->sbands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
- sband->bitrates = &ah->rates[IEEE80211_BAND_5GHZ][0];
+ sband = &ah->sbands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
+ sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
memcpy(sband->bitrates, &ath5k_rates[4],
sizeof(struct ieee80211_rate) * 8);
@@ -427,7 +427,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
ath5k_setup_rate_idx(ah, sband);
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 654a1e33f..929d7ccc0 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -1043,14 +1043,14 @@ ath5k_debug_dump_bands(struct ath5k_hw *ah)
BUG_ON(!ah->sbands);
- for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ for (b = 0; b < NUM_NL80211_BANDS; b++) {
struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
switch (band->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
strcpy(bname, "2 GHz");
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
strcpy(bname, "5 GHz");
break;
default:
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index bf29da5e9..fc47b7098 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -110,7 +110,7 @@ static const unsigned int ack_rates_high[] =
* bwmodes.
*/
int
-ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -221,7 +221,7 @@ ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
case AR5K_BWMODE_DEFAULT:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
default:
- if (channel->band == IEEE80211_BAND_5GHZ)
+ if (channel->band == NL80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
@@ -279,7 +279,7 @@ ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
/* Write rate duration table */
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 0fce1c766..641b13a27 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -75,13 +75,13 @@
/**
* ath5k_hw_radio_revision() - Get the PHY Chip revision
* @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
*
* Returns the revision number of a 2GHz, 5GHz or single chip
* radio.
*/
u16
-ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
{
unsigned int i;
u32 srev;
@@ -91,10 +91,10 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
* Set the radio chip access register
*/
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
break;
default:
@@ -138,11 +138,11 @@ ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
u16 freq = channel->center_freq;
/* Check if the channel is in our supported range */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
return true;
- } else if (channel->band == IEEE80211_BAND_5GHZ)
+ } else if (channel->band == NL80211_BAND_5GHZ)
if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
return true;
@@ -743,7 +743,7 @@ done:
/**
* ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
* @ah: The &struct ath5k_hw
- * @band: One of enum ieee80211_band
+ * @band: One of enum nl80211_band
*
* Write initial RF gain table to set the RF sensitivity.
*
@@ -751,7 +751,7 @@ done:
* with Gain_F calibration
*/
static int
-ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -786,7 +786,7 @@ ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
return -EINVAL;
}
- index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
+ index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
for (i = 0; i < size; i++) {
AR5K_REG_WAIT(i);
@@ -917,7 +917,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
/* Set Output and Driver bias current (OB/DB) */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
if (channel->hw_value == AR5K_MODE_11B)
ee_mode = AR5K_EEPROM_MODE_11B;
@@ -944,7 +944,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
AR5K_RF_DB_2GHZ, true);
/* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
- } else if ((channel->band == IEEE80211_BAND_5GHZ) ||
+ } else if ((channel->band == NL80211_BAND_5GHZ) ||
(ah->ah_radio == AR5K_RF5111)) {
/* For 11a, Turbo and XR we need to choose
@@ -1145,7 +1145,7 @@ ath5k_hw_rfregs_init(struct ath5k_hw *ah,
}
if (ah->ah_radio == AR5K_RF5413 &&
- channel->band == IEEE80211_BAND_2GHZ) {
+ channel->band == NL80211_BAND_2GHZ) {
ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
true);
@@ -1270,7 +1270,7 @@ ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
*/
data0 = data1 = 0;
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
/* Map 2GHz channel to 5GHz Atheros channel ID */
ret = ath5k_hw_rf5111_chan2athchan(
ieee80211_frequency_to_channel(channel->center_freq),
@@ -1446,7 +1446,7 @@ ath5k_hw_channel(struct ath5k_hw *ah,
"channel frequency (%u MHz) out of supported "
"band range\n",
channel->center_freq);
- return -EINVAL;
+ return -EINVAL;
}
/*
@@ -1919,7 +1919,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
/* Convert current frequency to fbin value (the same way channels
* are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
* up by 2 so we can compare it later */
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
chan_fbin = (channel->center_freq - 2300) * 10;
freq_band = AR5K_EEPROM_BAND_2GHZ;
} else {
@@ -1983,7 +1983,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
break;
default:
- if (channel->band == IEEE80211_BAND_5GHZ) {
+ if (channel->band == NL80211_BAND_5GHZ) {
/* Both sample_freq and chip_freq are 40MHz */
spur_delta_phase = (spur_offset << 17) / 25;
spur_freq_sigma_delta =
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index ddaad712c..beda11ce3 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -559,7 +559,7 @@ ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
@@ -596,10 +596,10 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
*
* Also we have different lowest rate for 802.11a
*/
- if (channel->band == IEEE80211_BAND_5GHZ)
- band = IEEE80211_BAND_5GHZ;
+ if (channel->band == NL80211_BAND_5GHZ)
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 99e62f99a..56d7925a0 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -634,7 +634,7 @@ ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- usleep_range(2000, 2500);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -699,7 +699,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- usleep_range(2000, 2500);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -752,7 +752,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
clock = AR5K_PHY_PLL_RF5111; /*Zero*/
}
- if (channel->band == IEEE80211_BAND_2GHZ) {
+ if (channel->band == NL80211_BAND_2GHZ) {
mode |= AR5K_PHY_MODE_FREQ_2GHZ;
clock |= AR5K_PHY_PLL_44MHZ;
@@ -771,7 +771,7 @@ ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
else
mode |= AR5K_PHY_MODE_MOD_DYN;
}
- } else if (channel->band == IEEE80211_BAND_5GHZ) {
+ } else if (channel->band == NL80211_BAND_5GHZ) {
mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
AR5K_PHY_MODE_MOD_OFDM);
@@ -906,7 +906,7 @@ ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
u32 data;
ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
AR5K_PHY_CCKTXCTL);
- if (channel->band == IEEE80211_BAND_5GHZ)
+ if (channel->band == NL80211_BAND_5GHZ)
data = 0xffb81020;
else
data = 0xffb80d20;
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index 7f3f94fbf..4e11ba06f 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -34,7 +34,7 @@
}
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.hw_value = (_channel), \
.center_freq = (_freq), \
.flags = (_flags), \
@@ -43,7 +43,7 @@
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.hw_value = (_channel), \
.center_freq = 5000 + (5 * (_channel)), \
.flags = (_flags), \
@@ -2583,7 +2583,7 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
}
#endif
-static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
+static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum nl80211_band band,
bool ht_enable)
{
struct ath6kl_htcap *htcap = &vif->htcap[band];
@@ -2594,7 +2594,7 @@ static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
if (ht_enable) {
/* Set default ht capabilities */
htcap->ht_enable = true;
- htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
+ htcap->cap_info = (band == NL80211_BAND_2GHZ) ?
ath6kl_g_htcap : ath6kl_a_htcap;
htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
} else /* Disable ht */
@@ -2609,7 +2609,7 @@ static int ath6kl_restore_htcap(struct ath6kl_vif *vif)
struct wiphy *wiphy = vif->ar->wiphy;
int band, ret = 0;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
@@ -3530,7 +3530,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- u32 rates[IEEE80211_NUM_BANDS];
+ u32 rates[NUM_NL80211_BANDS];
int ret, i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
@@ -3555,7 +3555,7 @@ static void ath6kl_cfg80211_reg_notify(struct wiphy *wiphy,
* changed.
*/
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
if (wiphy->bands[i])
rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1;
@@ -3791,8 +3791,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->bg_scan_period = 0;
- vif->htcap[IEEE80211_BAND_2GHZ].ht_enable = true;
- vif->htcap[IEEE80211_BAND_5GHZ].ht_enable = true;
+ vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
+ vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0) {
@@ -3943,9 +3943,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
wiphy->available_antennas_rx = ar->hw.rx_ant;
if (band_2gig)
- wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[NL80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
- wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &ath6kl_band_5ghz;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
diff --git a/drivers/net/wireless/ath/ath6kl/core.c b/drivers/net/wireless/ath/ath6kl/core.c
index 4ec02cea0..ebb9f1637 100644
--- a/drivers/net/wireless/ath/ath6kl/core.c
+++ b/drivers/net/wireless/ath/ath6kl/core.c
@@ -31,6 +31,7 @@ unsigned int debug_mask;
static unsigned int suspend_mode;
static unsigned int wow_mode;
static unsigned int uart_debug;
+static unsigned int uart_rate = 115200;
static unsigned int ath6kl_p2p;
static unsigned int testmode;
static unsigned int recovery_enable;
@@ -40,6 +41,7 @@ module_param(debug_mask, uint, 0644);
module_param(suspend_mode, uint, 0644);
module_param(wow_mode, uint, 0644);
module_param(uart_debug, uint, 0644);
+module_param(uart_rate, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
module_param(recovery_enable, uint, 0644);
@@ -180,6 +182,7 @@ int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
if (uart_debug)
ar->conf_flags |= ATH6KL_CONF_UART_DEBUG;
+ ar->hw.uarttx_rate = uart_rate;
set_bit(FIRST_BOOT, &ar->flag);
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6e770b78f..7067b87d0 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -623,7 +623,7 @@ struct ath6kl_vif {
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
- struct ath6kl_htcap htcap[IEEE80211_NUM_BANDS];
+ struct ath6kl_htcap htcap[NUM_NL80211_BANDS];
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
@@ -781,6 +781,7 @@ struct ath6kl {
u32 board_addr;
u32 refclk_hz;
u32 uarttx_pin;
+ u32 uarttx_rate;
u32 testscript_addr;
u8 tx_ant;
u8 rx_ant;
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index e508217a6..0d140a774 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -173,6 +173,7 @@ static const struct ath6kl_hw hw_list[] = {
.reserved_ram_size = 7168,
.board_addr = 0x436400,
.testscript_addr = 0,
+ .uarttx_pin = 11,
.flags = 0,
.fw = {
@@ -650,6 +651,14 @@ int ath6kl_configure_target(struct ath6kl *ar)
if (status)
return status;
+ /* Only set the baud rate if we're actually doing debug */
+ if (ar->conf_flags & ATH6KL_CONF_UART_DEBUG) {
+ status = ath6kl_bmi_write_hi32(ar, hi_desired_baud_rate,
+ ar->hw.uarttx_rate);
+ if (status)
+ return status;
+ }
+
/* Configure target refclk_hz */
if (ar->hw.refclk_hz != 0) {
status = ath6kl_bmi_write_hi32(ar, hi_refclk_hz,
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a5e1de75a..631c3a0c5 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -1584,6 +1584,11 @@ static int ath6kl_wmi_txe_notify_event_rx(struct wmi *wmi, u8 *datap, int len,
if (len < sizeof(*ev))
return -EINVAL;
+ if (vif->nw_type != INFRA_NETWORK ||
+ !test_bit(ATH6KL_FW_CAPABILITY_TX_ERR_NOTIFY,
+ vif->ar->fw_capabilities))
+ return -EOPNOTSUPP;
+
if (vif->sme_state != SME_CONNECTED)
return -ENOTCONN;
@@ -2043,7 +2048,7 @@ int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
sc->no_cck = cpu_to_le32(no_cck);
sc->num_ch = num_chan;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
sband = ar->wiphy->bands[band];
if (!sband)
@@ -2765,10 +2770,10 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
memset(&ratemask, 0, sizeof(ratemask));
/* only check 2.4 and 5 GHz bands, skip the rest */
- for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ratemask[band] =
mask->control[band].legacy << 4;
@@ -2794,9 +2799,9 @@ static int ath6kl_set_bitrate_mask64(struct wmi *wmi, u8 if_idx,
if (mode == WMI_RATES_MODE_11A ||
mode == WMI_RATES_MODE_11A_HT20 ||
mode == WMI_RATES_MODE_11A_HT40)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
cmd->ratemask[mode] = cpu_to_le64(ratemask[band]);
}
@@ -2817,10 +2822,10 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
memset(&ratemask, 0, sizeof(ratemask));
/* only check 2.4 and 5 GHz bands, skip the rest */
- for (band = 0; band <= IEEE80211_BAND_5GHZ; band++) {
+ for (band = 0; band <= NL80211_BAND_5GHZ; band++) {
/* copy legacy rate mask */
ratemask[band] = mask->control[band].legacy;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ratemask[band] =
mask->control[band].legacy << 4;
@@ -2844,9 +2849,9 @@ static int ath6kl_set_bitrate_mask32(struct wmi *wmi, u8 if_idx,
if (mode == WMI_RATES_MODE_11A ||
mode == WMI_RATES_MODE_11A_HT20 ||
mode == WMI_RATES_MODE_11A_HT40)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
cmd->ratemask[mode] = cpu_to_le32(ratemask[band]);
}
@@ -3169,7 +3174,7 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
}
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ath6kl_htcap *htcap)
{
struct sk_buff *skb;
@@ -3182,7 +3187,7 @@ int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
cmd = (struct wmi_set_htcap_cmd *) skb->data;
/*
- * NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
+ * NOTE: Band in firmware matches enum nl80211_band, it is unlikely
* this will be changed in firmware. If at all there is any change in
* band value, the host needs to be fixed.
*/
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index 05d25a94c..3af464a73 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -2628,7 +2628,7 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
u8 keep_alive_intvl);
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ath6kl_htcap *htcap);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 40fa915d6..f68cb0045 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -75,6 +75,26 @@ config ATH9K_STATION_STATISTICS
---help---
This option enables detailed statistics for association stations.
+config ATH9K_TX99
+ bool "Atheros ath9k TX99 testing support"
+ depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
+ default n
+ ---help---
+ Say N. This should only be enabled on systems undergoing
+ certification testing and evaluation in a controlled environment.
+ Enabling this will only enable TX99 support, all other modes of
+ operation will be disabled.
+
+ TX99 support enables Specific Absorption Rate (SAR) testing.
+ SAR is the unit of measurement for the amount of radio frequency(RF)
+ absorbed by the body when using a wireless device. The RF exposure
+ limits used are expressed in the terms of SAR, which is a measure
+ of the electric and magnetic field strength and power density for
+ transmitters operating at frequencies from 300 kHz to 100 GHz.
+ Regulatory bodies around the world require that wireless device
+ be evaluated to meet the RF exposure limits set forth in the
+ governmental SAR regulations.
+
config ATH9K_DFS_CERTIFIED
bool "Atheros DFS support for certified platforms"
depends on ATH9K && CFG80211_CERTIFICATION_ONUS
@@ -103,26 +123,6 @@ config ATH9K_DYNACK
based on ACK frame RX timestamp, TX frame timestamp and frame
duration
-config ATH9K_TX99
- bool "Atheros ath9k TX99 testing support"
- depends on ATH9K_DEBUGFS && CFG80211_CERTIFICATION_ONUS
- default n
- ---help---
- Say N. This should only be enabled on systems undergoing
- certification testing and evaluation in a controlled environment.
- Enabling this will only enable TX99 support, all other modes of
- operation will be disabled.
-
- TX99 support enables Specific Absorption Rate (SAR) testing.
- SAR is the unit of measurement for the amount of radio frequency(RF)
- absorbed by the body when using a wireless device. The RF exposure
- limits used are expressed in the terms of SAR, which is a measure
- of the electric and magnetic field strength and power density for
- transmitters operating at frequencies from 300 kHz to 100 GHz.
- Regulatory bodies around the world require that wireless device
- be evaluated to meet the RF exposure limits set forth in the
- governmental SAR regulations.
-
config ATH9K_WOW
bool "Wake on Wireless LAN support (EXPERIMENTAL)"
depends on ATH9K && PM
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index c38399bc9..c07866a2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -331,7 +331,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -351,7 +351,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 0c391997a..518e649ec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -1203,12 +1203,12 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{
int offset[8] = {0}, total = 0, test;
- int agc_out, i, peak_detect_threshold;
+ int agc_out, i, peak_detect_threshold = 0;
if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
peak_detect_threshold = 8;
- else
- peak_detect_threshold = 0;
+ else if (AR_SREV_9561(ah))
+ peak_detect_threshold = 11;
/*
* Turn off LNA/SW.
@@ -1249,17 +1249,14 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
- if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah) ||
- AR_SREV_9561(ah)) {
- if (is_2g)
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
- peak_detect_threshold);
- else
- REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
- AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
- peak_detect_threshold);
- }
+ if (is_2g)
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
+ peak_detect_threshold);
+ else
+ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
+ AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
+ peak_detect_threshold);
for (i = 6; i > 0; i--) {
offset[i] = BIT(i - 1);
@@ -1311,9 +1308,6 @@ static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah,
struct ath9k_hw_cal_data *caldata = ah->caldata;
int i;
- if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
- return;
-
if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal)
return;
@@ -1641,14 +1635,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
- if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah) ||
- AR_SREV_9561(ah)) {
- for (i = 0; i < AR9300_MAX_CHAINS; i++) {
- if (!(ah->rxchainmask & (1 << i)))
- continue;
- ar9003_hw_manual_peak_cal(ah, i,
- IS_CHAN_2GHZ(chan));
- }
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ if (!(ah->rxchainmask & (1 << i)))
+ continue;
+
+ ar9003_hw_manual_peak_cal(ah, i,
+ IS_CHAN_2GHZ(chan));
}
/*
@@ -1709,7 +1701,7 @@ void ar9003_hw_attach_calib_ops(struct ath_hw *ah)
struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
struct ath_hw_ops *ops = ath9k_hw_ops(ah);
- if (AR_SREV_9485(ah) || AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ if (AR_SREV_9003_PCOEM(ah))
priv_ops->init_cal = ar9003_hw_init_cal_pcoem;
else
priv_ops->init_cal = ar9003_hw_init_cal_soc;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 54ed2f72d..dec1a317a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3590,8 +3590,8 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
else
gpio = AR9300_EXT_LNA_CTL_GPIO_AR9485;
- ath9k_hw_cfg_output(ah, gpio,
- AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
+ ath9k_hw_gpio_request_out(ah, gpio, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_PCIE_ATTENTION_LED);
}
value = ar9003_hw_ant_ctrl_common_get(ah, is2ghz);
@@ -4097,16 +4097,16 @@ static void ar9003_hw_thermometer_apply(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON_OVR, therm_on);
- therm_on = (thermometer < 0) ? 0 : (thermometer == 0);
+ therm_on = thermometer == 0;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
if (pCap->chip_chainmask & BIT(1)) {
- therm_on = (thermometer < 0) ? 0 : (thermometer == 1);
+ therm_on = thermometer == 1;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH1_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
if (pCap->chip_chainmask & BIT(2)) {
- therm_on = (thermometer < 0) ? 0 : (thermometer == 2);
+ therm_on = thermometer == 2;
REG_RMW_FIELD(ah, AR_PHY_65NM_CH2_RXTX4,
AR_PHY_65NM_CH0_RXTX4_THERM_ON, therm_on);
}
@@ -4402,7 +4402,7 @@ static void ar9003_hw_selfgen_tpc_txpower(struct ath_hw *ah,
}
/* Set tx power registers to array of values passed in */
-static int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
+int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray)
{
#define POW_SM(_r, _s) (((_r) & 0x3f) << (_s))
/* make sure forced gain is not set */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 694ca2e68..107bcfbbe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -355,5 +355,6 @@ unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
struct ath9k_channel *chan);
void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
+int ar9003_hw_tx_power_regwrite(struct ath_hw *ah, u8 * pPwrArray);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
index af5ee416a..0fe9c8378 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -427,21 +427,34 @@ static void ar9003_mci_observation_set_up(struct ath_hw *ah)
struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
- ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 5, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
} else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
- ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
- ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
- ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
- ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+ ath9k_hw_gpio_request_out(ah, 3, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_gpio_request_out(ah, 2, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_gpio_request_out(ah, 1, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_gpio_request_out(ah, 0, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
} else
return;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 06c1ca6e8..ae3043559 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -17,6 +17,7 @@
#include <linux/export.h>
#include "hw.h"
#include "ar9003_phy.h"
+#include "ar9003_eeprom.h"
#define AR9300_OFDM_RATES 8
#define AR9300_HT_SS_RATES 8
@@ -1009,7 +1010,7 @@ static void ar9003_hw_set_rfmode(struct ath_hw *ah,
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rfMode |= (AR_PHY_MODE_DYNAMIC | AR_PHY_MODE_DYN_CCK_DISABLE);
- if (rfMode & (AR_PHY_MODE_QUARTER | AR_PHY_MODE_HALF))
+ if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan))
REG_RMW_FIELD(ah, AR_PHY_FRAME_CTL,
AR_PHY_FRAME_CTL_CF_OVERLAP_WINDOW, 3);
@@ -1337,11 +1338,11 @@ skip_ws_det:
chan->channel,
aniState->mrcCCK ? "on" : "off",
is_on ? "on" : "off");
- if (is_on)
- ah->stats.ast_ani_ccklow++;
- else
- ah->stats.ast_ani_cckhigh++;
- aniState->mrcCCK = is_on;
+ if (is_on)
+ ah->stats.ast_ani_ccklow++;
+ else
+ ah->stats.ast_ani_cckhigh++;
+ aniState->mrcCCK = is_on;
}
break;
}
@@ -1840,73 +1841,14 @@ static void ar9003_hw_tx99_stop(struct ath_hw *ah)
static void ar9003_hw_tx99_set_txpower(struct ath_hw *ah, u8 txpower)
{
- static s16 p_pwr_array[ar9300RateSize] = { 0 };
+ static u8 p_pwr_array[ar9300RateSize] = { 0 };
unsigned int i;
- if (txpower <= MAX_RATE_POWER) {
- for (i = 0; i < ar9300RateSize; i++)
- p_pwr_array[i] = txpower;
- } else {
- for (i = 0; i < ar9300RateSize; i++)
- p_pwr_array[i] = MAX_RATE_POWER;
- }
+ txpower = txpower <= MAX_RATE_POWER ? txpower : MAX_RATE_POWER;
+ for (i = 0; i < ar9300RateSize; i++)
+ p_pwr_array[i] = txpower;
- REG_WRITE(ah, 0xa458, 0);
-
- REG_WRITE(ah, 0xa3c0,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
- REG_WRITE(ah, 0xa3c4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_54], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_48], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_36], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_6_24], 0));
- REG_WRITE(ah, 0xa3c8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
- REG_WRITE(ah, 0xa3cc,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11S], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_11L], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_5S], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_LEGACY_1L_5L], 0));
- REG_WRITE(ah, 0xa3d0,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_5], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_4], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_1_3_9_11_17_19], 8)|
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_0_8_16], 0));
- REG_WRITE(ah, 0xa3d4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_13], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_12], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_7], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_6], 0));
- REG_WRITE(ah, 0xa3e4,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_21], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_20], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_15], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_14], 0));
- REG_WRITE(ah, 0xa3e8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_23], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_22], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_23], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT20_22], 0));
- REG_WRITE(ah, 0xa3d8,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_5], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_4], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_1_3_9_11_17_19], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_0_8_16], 0));
- REG_WRITE(ah, 0xa3dc,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_13], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_12], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_7], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_6], 0));
- REG_WRITE(ah, 0xa3ec,
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_21], 24) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_20], 16) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_15], 8) |
- ATH9K_POW_SM(p_pwr_array[ALL_TARGET_HT40_14], 0));
+ ar9003_hw_tx_power_regwrite(ah, p_pwr_array);
}
static void ar9003_hw_init_txpower_cck(struct ath_hw *ah, u8 *rate_array)
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index 2c42ff05e..29479afbc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -40,7 +40,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -59,7 +59,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
index 2154efcd3..c4a6ffa55 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p2_initvals.h
@@ -345,7 +345,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00003221, 0x00003221},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -364,7 +364,7 @@ static const u32 ar9331_1p2_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
index b995ffe88..2eb163fc1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9340_initvals.h
@@ -245,7 +245,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -265,7 +265,7 @@ static const u32 ar9340_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000220, 0x00000220, 0x00000110, 0x00000110},
{0x0000a28c, 0x00011111, 0x00011111, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1b6b4d0cf..b00dd6494 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -59,7 +59,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -79,7 +79,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
index dc3adda46..0f8745ec7 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p1_initvals.h
@@ -239,7 +239,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c780, 0xcfd5c280},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -259,7 +259,7 @@ static const u32 ar9462_2p1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index ce83ce47a..bdf6f107f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1026,7 +1026,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
{0x00009e14, 0x31395d53, 0x31396053, 0x312e6053, 0x312e5d53},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
@@ -1044,7 +1044,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
{0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
{0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
index c0b90daa3..924ae6bde 100644
--- a/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar953x_initvals.h
@@ -988,7 +988,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1008,7 +1008,7 @@ static const u32 qca953x_2p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
index 148562add..67edf344b 100644
--- a/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar955x_1p0_initvals.h
@@ -83,7 +83,7 @@ static const u32 ar955x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 10d4a6cb1..35c1bbb2f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -347,7 +347,7 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a4, 0x000003a4},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946222, 0xcf946222, 0xcf946220, 0xcf946220},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
index c3a47eaaf..db051071c 100644
--- a/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar956x_initvals.h
@@ -220,7 +220,7 @@ static const u32 qca956x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003a6, 0x000003a6},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcf946220, 0xcf946220},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index 5d4629f96..f4c9befb3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -1290,7 +1290,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946220, 0xcf946220},
{0x00009e44, 0x02321e27, 0x02321e27, 0x02291e27, 0x02291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
@@ -1310,7 +1310,7 @@ static const u32 ar9580_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000150, 0x00000150},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
- {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
+ {0x0000a2c4, 0x00058d18, 0x00058d18, 0x00058d18, 0x00058d18},
{0x0000a2d0, 0x00041983, 0x00041983, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 5294595da..93b3793cc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -813,7 +813,6 @@ static inline int ath9k_dump_btcoex(struct ath_softc *sc, u8 *buf, u32 size)
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
void ath_deinit_leds(struct ath_softc *sc);
-void ath_fill_led_pin(struct ath_softc *sc);
#else
static inline void ath_init_leds(struct ath_softc *sc)
{
@@ -822,9 +821,6 @@ static inline void ath_init_leds(struct ath_softc *sc)
static inline void ath_deinit_leds(struct ath_softc *sc)
{
}
-static inline void ath_fill_led_pin(struct ath_softc *sc)
-{
-}
#endif
/************************/
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 5a084d94e..618c9df35 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -15,6 +15,8 @@
*/
#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/ath9k_platform.h>
#include "hw.h"
enum ath_bt_mode {
@@ -34,6 +36,8 @@ struct ath_btcoex_config {
u8 bt_priority_time;
u8 bt_first_slot_time;
bool bt_hold_rx_clear;
+ u8 wl_active_time;
+ u8 wl_qc_time;
};
static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
@@ -65,31 +69,71 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
.bt_priority_time = 2,
.bt_first_slot_time = 5,
.bt_hold_rx_clear = true,
+ .wl_active_time = 0x20,
+ .wl_qc_time = 0x20,
};
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+ u8 time_extend = ath_bt_config.bt_time_extend;
+ u8 first_slot_time = ath_bt_config.bt_first_slot_time;
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
+ if (AR_SREV_SOC(ah)) {
+ first_slot_time = 0x1d;
+ time_extend = 0xa;
+
+ btcoex_hw->bt_coex_mode3 =
+ SM(ath_bt_config.wl_active_time, AR_BT_WL_ACTIVE_TIME) |
+ SM(ath_bt_config.wl_qc_time, AR_BT_WL_QC_TIME);
+
+ btcoex_hw->bt_coex_mode2 =
+ AR_BT_PROTECT_BT_AFTER_WAKEUP |
+ AR_BT_PHY_ERR_BT_COLL_ENABLE;
+ }
+
btcoex_hw->bt_coex_mode =
(btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
- SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
+ SM(time_extend, AR_BT_TIME_EXTEND) |
SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
SM(ath_bt_config.bt_mode, AR_BT_MODE) |
SM(ath_bt_config.bt_quiet_collision, AR_BT_QUIET) |
SM(rxclear_polarity, AR_BT_RX_CLEAR_POLARITY) |
SM(ath_bt_config.bt_priority_time, AR_BT_PRIORITY_TIME) |
- SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
+ SM(first_slot_time, AR_BT_FIRST_SLOT_TIME) |
SM(qnum, AR_BT_QCU_THRESH);
- btcoex_hw->bt_coex_mode2 =
+ btcoex_hw->bt_coex_mode2 |=
SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
AR_BT_DISABLE_BT_ANT;
}
EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
+static void ath9k_hw_btcoex_pin_init(struct ath_hw *ah, u8 wlanactive_gpio,
+ u8 btactive_gpio, u8 btpriority_gpio)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath9k_platform_data *pdata = ah->dev->platform_data;
+
+ if (btcoex_hw->scheme != ATH_BTCOEX_CFG_2WIRE &&
+ btcoex_hw->scheme != ATH_BTCOEX_CFG_3WIRE)
+ return;
+
+ /* bt priority GPIO will be ignored by 2 wire scheme */
+ if (pdata && (pdata->bt_active_pin || pdata->bt_priority_pin ||
+ pdata->wlan_active_pin)) {
+ btcoex_hw->btactive_gpio = pdata->bt_active_pin;
+ btcoex_hw->wlanactive_gpio = pdata->wlan_active_pin;
+ btcoex_hw->btpriority_gpio = pdata->bt_priority_pin;
+ } else {
+ btcoex_hw->btactive_gpio = btactive_gpio;
+ btcoex_hw->wlanactive_gpio = wlanactive_gpio;
+ btcoex_hw->btpriority_gpio = btpriority_gpio;
+ }
+}
+
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -107,19 +151,19 @@ void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah)
btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
} else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
- btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9300;
- } else if (AR_SREV_9280_20_OR_LATER(ah)) {
- btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9280;
- btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9280;
- if (AR_SREV_9285(ah)) {
+ ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9300,
+ ATH_BTACTIVE_GPIO_9300,
+ ATH_BTPRIORITY_GPIO_9300);
+ } else if (AR_SREV_9280_20_OR_LATER(ah)) {
+ if (AR_SREV_9285(ah))
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
- btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO_9285;
- } else {
+ else
btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
- }
+
+ ath9k_hw_btcoex_pin_init(ah, ATH_WLANACTIVE_GPIO_9280,
+ ATH_BTACTIVE_GPIO_9280,
+ ATH_BTPRIORITY_GPIO_9285);
}
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_scheme);
@@ -137,12 +181,14 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
/* Set input mux for bt_active to gpio pin */
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_ACTIVE,
- btcoex_hw->btactive_gpio);
+ if (!AR_SREV_SOC(ah))
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
/* Configure the desired gpio port for input */
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+ "ath9k-btactive");
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
@@ -157,21 +203,33 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
/* Set input mux for bt_prority_async and
* bt_active_async to GPIO pins */
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_ACTIVE,
- btcoex_hw->btactive_gpio);
-
- REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
- AR_GPIO_INPUT_MUX1_BT_PRIORITY,
- btcoex_hw->btpriority_gpio);
+ if (!AR_SREV_SOC(ah)) {
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_ACTIVE,
+ btcoex_hw->btactive_gpio);
+ REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
+ AR_GPIO_INPUT_MUX1_BT_PRIORITY,
+ btcoex_hw->btpriority_gpio);
+ }
/* Configure the desired GPIO ports for input */
-
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
- ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btactive_gpio,
+ "ath9k-btactive");
+ ath9k_hw_gpio_request_in(ah, btcoex_hw->btpriority_gpio,
+ "ath9k-btpriority");
}
EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+
+ ath9k_hw_gpio_free(ah, btcoex_hw->btactive_gpio);
+ ath9k_hw_gpio_free(ah, btcoex_hw->btpriority_gpio);
+ ath9k_hw_gpio_free(ah, btcoex_hw->wlanactive_gpio);
+}
+EXPORT_SYMBOL(ath9k_hw_btcoex_deinit);
+
void ath9k_hw_btcoex_init_mci(struct ath_hw *ah)
{
ah->btcoex_hw.mci.ready = false;
@@ -201,8 +259,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
/* Configure the desired GPIO port for TX_FRAME output */
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
+ ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+ "ath9k-wlanactive",
+ AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
}
/*
@@ -247,13 +306,13 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
txprio_shift[i-1]);
}
}
+
/* Last WLAN weight has to be adjusted wrt tx priority */
if (concur_tx) {
btcoex_hw->wlan_weight[i-1] &= ~(0xff << txprio_shift[i-1]);
btcoex_hw->wlan_weight[i-1] |= (btcoex_hw->tx_prio[stomp_type]
<< txprio_shift[i-1]);
}
-
}
EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
@@ -268,9 +327,14 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
* Program coex mode and weight registers to
* enable coex 3-wire
*/
+ if (AR_SREV_SOC(ah))
+ REG_CLR_BIT(ah, AR_BT_COEX_MODE2, AR_BT_PHY_ERR_BT_COLL_ENABLE);
+
REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+ if (AR_SREV_SOC(ah))
+ REG_WRITE(ah, AR_BT_COEX_MODE3, btcoex->bt_coex_mode3);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
@@ -281,8 +345,6 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
-
-
if (AR_SREV_9271(ah)) {
val = REG_READ(ah, 0x50040);
val &= 0xFFFFFEFF;
@@ -292,8 +354,9 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
+ ath9k_hw_gpio_request_out(ah, btcoex->wlanactive_gpio,
+ "ath9k-wlanactive",
+ AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
@@ -339,7 +402,8 @@ void ath9k_hw_btcoex_enable(struct ath_hw *ah)
break;
}
- if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI) {
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_MCI &&
+ !AR_SREV_SOC(ah)) {
REG_RMW(ah, AR_GPIO_PDPU,
(0x2 << (btcoex_hw->btactive_gpio * 2)),
(0x3 << (btcoex_hw->btactive_gpio * 2)));
@@ -364,8 +428,8 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, btcoex_hw->wlanactive_gpio,
+ NULL, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index cd2f0a237..1bdfa8465 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -115,6 +115,7 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_coex_mode3; /* Register setting for AR_BT_COEX_MODE3 */
u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u8 tx_prio[ATH_BTCOEX_STOMP_MAX];
@@ -123,6 +124,7 @@ struct ath_btcoex_hw {
void ath9k_hw_btcoex_init_scheme(struct ath_hw *ah);
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
+void ath9k_hw_btcoex_deinit(struct ath_hw *ah);
void ath9k_hw_btcoex_init_mci(struct ath_hw *ah);
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 37f6d66d1..0f71146b7 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -145,14 +145,14 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
- enum ieee80211_band band,
+ enum nl80211_band band,
int16_t *nft)
{
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
break;
default:
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index 319cb5f25..e56bafcf5 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -107,9 +107,9 @@ void ath_chanctx_init(struct ath_softc *sc)
struct ieee80211_channel *chan;
int i, j;
- sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ sband = &common->sbands[NL80211_BAND_2GHZ];
if (!sband->n_channels)
- sband = &common->sbands[IEEE80211_BAND_5GHZ];
+ sband = &common->sbands[NL80211_BAND_5GHZ];
chan = &sband->channels[0];
for (i = 0; i < ATH9K_NUM_CHANCTX; i++) {
@@ -1333,9 +1333,9 @@ void ath9k_offchannel_init(struct ath_softc *sc)
struct ieee80211_channel *chan;
int i;
- sband = &common->sbands[IEEE80211_BAND_2GHZ];
+ sband = &common->sbands[NL80211_BAND_2GHZ];
if (!sband->n_channels)
- sband = &common->sbands[IEEE80211_BAND_5GHZ];
+ sband = &common->sbands[NL80211_BAND_5GHZ];
chan = &sband->channels[0];
diff --git a/drivers/net/wireless/ath/ath9k/common-init.c b/drivers/net/wireless/ath/ath9k/common-init.c
index a006c1499..8b4f7fdab 100644
--- a/drivers/net/wireless/ath/ath9k/common-init.c
+++ b/drivers/net/wireless/ath/ath9k/common-init.c
@@ -19,14 +19,14 @@
#include "common.h"
#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 20, \
}
#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 20, \
@@ -139,12 +139,12 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
memcpy(channels, ath9k_2ghz_chantable,
sizeof(ath9k_2ghz_chantable));
- common->sbands[IEEE80211_BAND_2GHZ].channels = channels;
- common->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- common->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ common->sbands[NL80211_BAND_2GHZ].channels = channels;
+ common->sbands[NL80211_BAND_2GHZ].band = NL80211_BAND_2GHZ;
+ common->sbands[NL80211_BAND_2GHZ].n_channels =
ARRAY_SIZE(ath9k_2ghz_chantable);
- common->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- common->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ common->sbands[NL80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ common->sbands[NL80211_BAND_2GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates);
}
@@ -156,13 +156,13 @@ int ath9k_cmn_init_channels_rates(struct ath_common *common)
memcpy(channels, ath9k_5ghz_chantable,
sizeof(ath9k_5ghz_chantable));
- common->sbands[IEEE80211_BAND_5GHZ].channels = channels;
- common->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- common->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ common->sbands[NL80211_BAND_5GHZ].channels = channels;
+ common->sbands[NL80211_BAND_5GHZ].band = NL80211_BAND_5GHZ;
+ common->sbands[NL80211_BAND_5GHZ].n_channels =
ARRAY_SIZE(ath9k_5ghz_chantable);
- common->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ common->sbands[NL80211_BAND_5GHZ].bitrates =
ath9k_legacy_rates + 4;
- common->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ common->sbands[NL80211_BAND_5GHZ].n_bitrates =
ARRAY_SIZE(ath9k_legacy_rates) - 4;
}
return 0;
@@ -236,9 +236,9 @@ void ath9k_cmn_reload_chainmask(struct ath_hw *ah)
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
ath9k_cmn_setup_ht_cap(ah,
- &common->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ &common->sbands[NL80211_BAND_2GHZ].ht_cap);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
ath9k_cmn_setup_ht_cap(ah,
- &common->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ &common->sbands[NL80211_BAND_5GHZ].ht_cap);
}
EXPORT_SYMBOL(ath9k_cmn_reload_chainmask);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index e8c699446..b80e08b13 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -173,7 +173,7 @@ int ath9k_cmn_process_rate(struct ath_common *common,
struct ieee80211_rx_status *rxs)
{
struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
+ enum nl80211_band band;
unsigned int i = 0;
struct ath_hw *ah = common->ah;
@@ -305,7 +305,7 @@ static void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
ichan->channel = chan->center_freq;
ichan->chan = chan;
- if (chan->band == IEEE80211_BAND_5GHZ)
+ if (chan->band == NL80211_BAND_5GHZ)
flags |= CHANNEL_5GHZ;
switch (chandef->width) {
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 6de64cfac..c56e40ff3 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -916,10 +916,21 @@ static int open_file_regdump(struct inode *inode, struct file *file)
struct ath_softc *sc = inode->i_private;
unsigned int len = 0;
u8 *buf;
- int i;
+ int i, j = 0;
unsigned long num_regs, regdump_len, max_reg_offset;
+ const struct reg_hole {
+ u32 start;
+ u32 end;
+ } reg_hole_list[] = {
+ {0x0200, 0x07fc},
+ {0x0c00, 0x0ffc},
+ {0x2000, 0x3ffc},
+ {0x4100, 0x6ffc},
+ {0x705c, 0x7ffc},
+ {0x0000, 0x0000}
+ };
- max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x16bd4 : 0xb500;
+ max_reg_offset = AR_SREV_9300_20_OR_LATER(sc->sc_ah) ? 0x8800 : 0xb500;
num_regs = max_reg_offset / 4 + 1;
regdump_len = num_regs * REGDUMP_LINE_SIZE + 1;
buf = vmalloc(regdump_len);
@@ -927,9 +938,16 @@ static int open_file_regdump(struct inode *inode, struct file *file)
return -ENOMEM;
ath9k_ps_wakeup(sc);
- for (i = 0; i < num_regs; i++)
+ for (i = 0; i < num_regs; i++) {
+ if (reg_hole_list[j].start == i << 2) {
+ i = reg_hole_list[j].end >> 2;
+ j++;
+ continue;
+ }
+
len += scnprintf(buf + len, regdump_len - len,
"0x%06x 0x%08x\n", i << 2, REG_READ(sc->sc_ah, i << 2));
+ }
ath9k_ps_restore(sc);
file->private_data = buf;
diff --git a/drivers/net/wireless/ath/ath9k/debug_sta.c b/drivers/net/wireless/ath/ath9k/debug_sta.c
index c2ca57a2e..b66cfa913 100644
--- a/drivers/net/wireless/ath/ath9k/debug_sta.c
+++ b/drivers/net/wireless/ath/ath9k/debug_sta.c
@@ -139,7 +139,7 @@ void ath_debug_rate_stats(struct ath_softc *sc,
}
if (IS_OFDM_RATE(rs->rs_rate)) {
- if (ah->curchan->chan->band == IEEE80211_BAND_2GHZ)
+ if (ah->curchan->chan->band == NL80211_BAND_2GHZ)
rstats->ofdm_stats[rxs->rate_idx - 4].ofdm_cnt++;
else
rstats->ofdm_stats[rxs->rate_idx].ofdm_cnt++;
@@ -173,7 +173,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
struct ath_hw *ah = sc->sc_ah;
struct ath_rx_rate_stats *rstats;
struct ieee80211_sta *sta = an->sta;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 len = 0, size = 4096;
char *buf;
size_t retval;
@@ -206,7 +206,7 @@ static ssize_t read_file_node_recv(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, size - len, "\n");
legacy:
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
PRINT_CCK_RATE("CCK-1M/LP", 0, false);
PRINT_CCK_RATE("CCK-2M/LP", 1, false);
PRINT_CCK_RATE("CCK-5.5M/LP", 2, false);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 22b3cc4c2..d2ff0fc04 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -212,7 +212,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
struct ieee80211_tx_rate *rates = info->status.rates;
rate = &common->sbands[info->band].bitrates[rates[ridx].idx];
- if (info->band == IEEE80211_BAND_2GHZ &&
+ if (info->band == NL80211_BAND_2GHZ &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 284706798..490f74d9d 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -21,6 +21,33 @@
/********************************/
#ifdef CONFIG_MAC80211_LEDS
+
+void ath_fill_led_pin(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ /* Set default led pin if invalid */
+ if (ah->led_pin < 0) {
+ if (AR_SREV_9287(ah))
+ ah->led_pin = ATH_LED_PIN_9287;
+ else if (AR_SREV_9485(ah))
+ ah->led_pin = ATH_LED_PIN_9485;
+ else if (AR_SREV_9300(ah))
+ ah->led_pin = ATH_LED_PIN_9300;
+ else if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ ah->led_pin = ATH_LED_PIN_9462;
+ else
+ ah->led_pin = ATH_LED_PIN_DEF;
+ }
+
+ /* Configure gpio for output */
+ ath9k_hw_gpio_request_out(ah, ah->led_pin, "ath9k-led",
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ /* LED off, active low */
+ ath9k_hw_set_gpio(ah, ah->led_pin, ah->config.led_active_high ? 0 : 1);
+}
+
static void ath_led_brightness(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
@@ -40,6 +67,8 @@ void ath_deinit_leds(struct ath_softc *sc)
ath_led_brightness(&sc->led_cdev, LED_OFF);
led_classdev_unregister(&sc->led_cdev);
+
+ ath9k_hw_gpio_free(sc->sc_ah, sc->sc_ah->led_pin);
}
void ath_init_leds(struct ath_softc *sc)
@@ -49,6 +78,8 @@ void ath_init_leds(struct ath_softc *sc)
if (AR_SREV_9100(sc->sc_ah))
return;
+ ath_fill_led_pin(sc);
+
if (!ath9k_led_blink)
sc->led_cdev.default_trigger =
ieee80211_get_radio_led_name(sc->hw);
@@ -64,37 +95,6 @@ void ath_init_leds(struct ath_softc *sc)
sc->led_registered = true;
}
-
-void ath_fill_led_pin(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (AR_SREV_9100(ah))
- return;
-
- if (ah->led_pin >= 0) {
- if (!((1 << ah->led_pin) & AR_GPIO_OE_OUT_MASK))
- ath9k_hw_request_gpio(ah, ah->led_pin, "ath9k-led");
- return;
- }
-
- if (AR_SREV_9287(ah))
- ah->led_pin = ATH_LED_PIN_9287;
- else if (AR_SREV_9485(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9485;
- else if (AR_SREV_9300(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9300;
- else if (AR_SREV_9462(sc->sc_ah) || AR_SREV_9565(sc->sc_ah))
- ah->led_pin = ATH_LED_PIN_9462;
- else
- ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(ah, ah->led_pin, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
-
- /* LED off, active low */
- ath9k_hw_set_gpio(ah, ah->led_pin, (ah->config.led_active_high) ? 0 : 1);
-}
#endif
/*******************/
@@ -402,6 +402,13 @@ void ath9k_deinit_btcoex(struct ath_softc *sc)
if (ath9k_hw_mci_is_enabled(ah))
ath_mci_cleanup(sc);
+ else {
+ enum ath_btcoex_scheme scheme = ath9k_hw_get_btcoex_scheme(ah);
+
+ if (scheme == ATH_BTCOEX_CFG_2WIRE ||
+ scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_hw_btcoex_deinit(sc->sc_ah);
+ }
}
int ath9k_init_btcoex(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 8cbf4904d..e1c338cb9 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -527,7 +527,7 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
struct sk_buff *skb)
{
struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
- int index = 0, i = 0, len = skb->len;
+ int index = 0, i, len = skb->len;
int rx_remain_len, rx_pkt_len;
u16 pool_index = 0;
u8 *ptr;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 2aabcbdab..ecb848b60 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -253,17 +253,19 @@ void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
ath9k_led_brightness(&priv->led_cdev, LED_OFF);
led_classdev_unregister(&priv->led_cdev);
cancel_work_sync(&priv->led_work);
+
+ ath9k_hw_gpio_free(priv->ah, priv->ah->led_pin);
}
void ath9k_configure_leds(struct ath9k_htc_priv *priv)
{
/* Configure gpio 1 for output */
- ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(priv->ah, priv->ah->led_pin,
+ "ath9k-led",
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
/* LED off, active low */
ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
-
}
void ath9k_init_leds(struct ath9k_htc_priv *priv)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 8647ab77c..c148c6c50 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -262,11 +262,11 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
__be32 tmpval[8];
int i, ret;
- for (i = 0; i < count; i++) {
- tmpaddr[i] = cpu_to_be32(addr[i]);
- }
+ for (i = 0; i < count; i++) {
+ tmpaddr[i] = cpu_to_be32(addr[i]);
+ }
- ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
+ ret = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
(u8 *)tmpaddr , sizeof(u32) * count,
(u8 *)tmpval, sizeof(u32) * count,
100);
@@ -275,9 +275,9 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
"Multiple REGISTER READ FAILED (count: %d)\n", count);
}
- for (i = 0; i < count; i++) {
- val[i] = be32_to_cpu(tmpval[i]);
- }
+ for (i = 0; i < count; i++) {
+ val[i] = be32_to_cpu(tmpval[i]);
+ }
}
static void ath9k_regwrite_multi(struct ath_common *common)
@@ -765,11 +765,11 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
sizeof(struct htc_frame_hdr) + 4;
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &common->sbands[IEEE80211_BAND_2GHZ];
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &common->sbands[NL80211_BAND_2GHZ];
if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &common->sbands[IEEE80211_BAND_5GHZ];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &common->sbands[NL80211_BAND_5GHZ];
ath9k_cmn_reload_chainmask(ah);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 639294a9e..a553c91d4 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -246,7 +246,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
struct ieee80211_conf *conf = &common->hw->conf;
bool fastcc;
struct ieee80211_channel *channel = hw->conf.chandef.chan;
- struct ath9k_hw_cal_data *caldata = NULL;
+ struct ath9k_hw_cal_data *caldata;
enum htc_phymode mode;
__be16 htc_mode;
u8 cmd_rsp;
@@ -274,10 +274,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
priv->ah->curchan->channel,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
fastcc);
-
- if (!fastcc)
- caldata = &priv->caldata;
-
+ caldata = fastcc ? NULL : &priv->caldata;
ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
if (ret) {
ath_err(common,
@@ -1770,8 +1767,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
memset(&tmask, 0, sizeof(struct ath9k_htc_target_rate_mask));
tmask.vif_index = avp->index;
- tmask.band = IEEE80211_BAND_2GHZ;
- tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_2GHZ].legacy);
+ tmask.band = NL80211_BAND_2GHZ;
+ tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_2GHZ].legacy);
WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
if (ret) {
@@ -1781,8 +1778,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- tmask.band = IEEE80211_BAND_5GHZ;
- tmask.mask = cpu_to_be32(mask->control[IEEE80211_BAND_5GHZ].legacy);
+ tmask.band = NL80211_BAND_5GHZ;
+ tmask.mask = cpu_to_be32(mask->control[NL80211_BAND_5GHZ].legacy);
WMI_CMD_BUF(WMI_BITRATE_MASK_CMDID, &tmask);
if (ret) {
@@ -1793,8 +1790,8 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
}
ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
- mask->control[IEEE80211_BAND_2GHZ].legacy,
- mask->control[IEEE80211_BAND_5GHZ].legacy);
+ mask->control[NL80211_BAND_2GHZ].legacy,
+ mask->control[NL80211_BAND_5GHZ].legacy);
out:
return ret;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index cc9648f84..f333ef1e3 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -494,7 +494,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
if (txs->ts_flags & ATH9K_HTC_TXSTAT_SGI)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
} else {
- if (cur_conf->chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (cur_conf->chandef.chan->band == NL80211_BAND_5GHZ)
rate->idx += 4; /* No CCK rates */
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index e7a31016f..8b2895f9a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1582,8 +1582,10 @@ static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
if (!(gpio_mask & 1))
continue;
- ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_gpio_request_out(ah, i, NULL,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
+ ath9k_hw_gpio_free(ah, i);
}
}
@@ -1958,7 +1960,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
+ ath9k_hw_gpio_request_in(ah, ah->rfkill_gpio, "ath9k-rfkill");
ath9k_hw_init_global_settings(ah);
@@ -2385,6 +2387,61 @@ static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
}
}
+static void ath9k_gpio_cap_init(struct ath_hw *ah)
+{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+
+ if (AR_SREV_9271(ah)) {
+ pCap->num_gpio_pins = AR9271_NUM_GPIO;
+ pCap->gpio_mask = AR9271_GPIO_MASK;
+ } else if (AR_DEVID_7010(ah)) {
+ pCap->num_gpio_pins = AR7010_NUM_GPIO;
+ pCap->gpio_mask = AR7010_GPIO_MASK;
+ } else if (AR_SREV_9287(ah)) {
+ pCap->num_gpio_pins = AR9287_NUM_GPIO;
+ pCap->gpio_mask = AR9287_GPIO_MASK;
+ } else if (AR_SREV_9285(ah)) {
+ pCap->num_gpio_pins = AR9285_NUM_GPIO;
+ pCap->gpio_mask = AR9285_GPIO_MASK;
+ } else if (AR_SREV_9280(ah)) {
+ pCap->num_gpio_pins = AR9280_NUM_GPIO;
+ pCap->gpio_mask = AR9280_GPIO_MASK;
+ } else if (AR_SREV_9300(ah)) {
+ pCap->num_gpio_pins = AR9300_NUM_GPIO;
+ pCap->gpio_mask = AR9300_GPIO_MASK;
+ } else if (AR_SREV_9330(ah)) {
+ pCap->num_gpio_pins = AR9330_NUM_GPIO;
+ pCap->gpio_mask = AR9330_GPIO_MASK;
+ } else if (AR_SREV_9340(ah)) {
+ pCap->num_gpio_pins = AR9340_NUM_GPIO;
+ pCap->gpio_mask = AR9340_GPIO_MASK;
+ } else if (AR_SREV_9462(ah)) {
+ pCap->num_gpio_pins = AR9462_NUM_GPIO;
+ pCap->gpio_mask = AR9462_GPIO_MASK;
+ } else if (AR_SREV_9485(ah)) {
+ pCap->num_gpio_pins = AR9485_NUM_GPIO;
+ pCap->gpio_mask = AR9485_GPIO_MASK;
+ } else if (AR_SREV_9531(ah)) {
+ pCap->num_gpio_pins = AR9531_NUM_GPIO;
+ pCap->gpio_mask = AR9531_GPIO_MASK;
+ } else if (AR_SREV_9550(ah)) {
+ pCap->num_gpio_pins = AR9550_NUM_GPIO;
+ pCap->gpio_mask = AR9550_GPIO_MASK;
+ } else if (AR_SREV_9561(ah)) {
+ pCap->num_gpio_pins = AR9561_NUM_GPIO;
+ pCap->gpio_mask = AR9561_GPIO_MASK;
+ } else if (AR_SREV_9565(ah)) {
+ pCap->num_gpio_pins = AR9565_NUM_GPIO;
+ pCap->gpio_mask = AR9565_GPIO_MASK;
+ } else if (AR_SREV_9580(ah)) {
+ pCap->num_gpio_pins = AR9580_NUM_GPIO;
+ pCap->gpio_mask = AR9580_GPIO_MASK;
+ } else {
+ pCap->num_gpio_pins = AR_NUM_GPIO;
+ pCap->gpio_mask = AR_GPIO_MASK;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2478,20 +2535,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
- if (AR_SREV_9271(ah))
- pCap->num_gpio_pins = AR9271_NUM_GPIO;
- else if (AR_DEVID_7010(ah))
- pCap->num_gpio_pins = AR7010_NUM_GPIO;
- else if (AR_SREV_9300_20_OR_LATER(ah))
- pCap->num_gpio_pins = AR9300_NUM_GPIO;
- else if (AR_SREV_9287_11_OR_LATER(ah))
- pCap->num_gpio_pins = AR9287_NUM_GPIO;
- else if (AR_SREV_9285_12_OR_LATER(ah))
- pCap->num_gpio_pins = AR9285_NUM_GPIO;
- else if (AR_SREV_9280_20_OR_LATER(ah))
- pCap->num_gpio_pins = AR928X_NUM_GPIO;
- else
- pCap->num_gpio_pins = AR_NUM_GPIO;
+ ath9k_gpio_cap_init(ah);
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
@@ -2612,8 +2656,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
/* GPIO / RFKILL / Antennae */
/****************************/
-static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
- u32 gpio, u32 type)
+static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah, u32 gpio, u32 type)
{
int addr;
u32 gpio_shift, tmp;
@@ -2627,8 +2670,8 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
gpio_shift = (gpio % 6) * 5;
- if (AR_SREV_9280_20_OR_LATER(ah)
- || (addr != AR_GPIO_OUTPUT_MUX1)) {
+ if (AR_SREV_9280_20_OR_LATER(ah) ||
+ (addr != AR_GPIO_OUTPUT_MUX1)) {
REG_RMW(ah, addr, (type << gpio_shift),
(0x1f << gpio_shift));
} else {
@@ -2640,106 +2683,144 @@ static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
}
}
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
+/* BSP should set the corresponding MUX register correctly.
+ */
+static void ath9k_hw_gpio_cfg_soc(struct ath_hw *ah, u32 gpio, bool out,
+ const char *label)
{
- u32 gpio_shift;
+ if (ah->caps.gpio_requested & BIT(gpio))
+ return;
- BUG_ON(gpio >= ah->caps.num_gpio_pins);
+ /* may be requested by BSP, free anyway */
+ gpio_free(gpio);
- if (AR_DEVID_7010(ah)) {
- gpio_shift = gpio;
- REG_RMW(ah, AR7010_GPIO_OE,
- (AR7010_GPIO_OE_AS_INPUT << gpio_shift),
- (AR7010_GPIO_OE_MASK << gpio_shift));
+ if (gpio_request_one(gpio, out ? GPIOF_OUT_INIT_LOW : GPIOF_IN, label))
return;
- }
- gpio_shift = gpio << 1;
- REG_RMW(ah,
- AR_GPIO_OE_OUT,
- (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
- (AR_GPIO_OE_OUT_DRV << gpio_shift));
+ ah->caps.gpio_requested |= BIT(gpio);
}
-EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
-u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+static void ath9k_hw_gpio_cfg_wmac(struct ath_hw *ah, u32 gpio, bool out,
+ u32 ah_signal_type)
{
-#define MS_REG_READ(x, y) \
- (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
-
- if (gpio >= ah->caps.num_gpio_pins)
- return 0xffffffff;
+ u32 gpio_set, gpio_shift = gpio;
if (AR_DEVID_7010(ah)) {
- u32 val;
- val = REG_READ(ah, AR7010_GPIO_IN);
- return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
- } else if (AR_SREV_9300_20_OR_LATER(ah))
- return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
- AR_GPIO_BIT(gpio)) != 0;
- else if (AR_SREV_9271(ah))
- return MS_REG_READ(AR9271, gpio) != 0;
- else if (AR_SREV_9287_11_OR_LATER(ah))
- return MS_REG_READ(AR9287, gpio) != 0;
- else if (AR_SREV_9285_12_OR_LATER(ah))
- return MS_REG_READ(AR9285, gpio) != 0;
- else if (AR_SREV_9280_20_OR_LATER(ah))
- return MS_REG_READ(AR928X, gpio) != 0;
- else
- return MS_REG_READ(AR, gpio) != 0;
+ gpio_set = out ?
+ AR7010_GPIO_OE_AS_OUTPUT : AR7010_GPIO_OE_AS_INPUT;
+ REG_RMW(ah, AR7010_GPIO_OE, gpio_set << gpio_shift,
+ AR7010_GPIO_OE_MASK << gpio_shift);
+ } else if (AR_SREV_SOC(ah)) {
+ gpio_set = out ? 1 : 0;
+ REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ gpio_set << gpio_shift);
+ } else {
+ gpio_shift = gpio << 1;
+ gpio_set = out ?
+ AR_GPIO_OE_OUT_DRV_ALL : AR_GPIO_OE_OUT_DRV_NO;
+ REG_RMW(ah, AR_GPIO_OE_OUT, gpio_set << gpio_shift,
+ AR_GPIO_OE_OUT_DRV << gpio_shift);
+
+ if (out)
+ ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
+ }
}
-EXPORT_SYMBOL(ath9k_hw_gpio_get);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
- u32 ah_signal_type)
+static void ath9k_hw_gpio_request(struct ath_hw *ah, u32 gpio, bool out,
+ const char *label, u32 ah_signal_type)
{
- u32 gpio_shift;
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
- if (AR_DEVID_7010(ah)) {
- gpio_shift = gpio;
- REG_RMW(ah, AR7010_GPIO_OE,
- (AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
- (AR7010_GPIO_OE_MASK << gpio_shift));
- return;
- }
+ if (BIT(gpio) & ah->caps.gpio_mask)
+ ath9k_hw_gpio_cfg_wmac(ah, gpio, out, ah_signal_type);
+ else if (AR_SREV_SOC(ah))
+ ath9k_hw_gpio_cfg_soc(ah, gpio, out, label);
+ else
+ WARN_ON(1);
+}
- ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
- gpio_shift = 2 * gpio;
- REG_RMW(ah,
- AR_GPIO_OE_OUT,
- (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
- (AR_GPIO_OE_OUT_DRV << gpio_shift));
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label)
+{
+ ath9k_hw_gpio_request(ah, gpio, false, label, 0);
}
-EXPORT_SYMBOL(ath9k_hw_cfg_output);
+EXPORT_SYMBOL(ath9k_hw_gpio_request_in);
-void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+ u32 ah_signal_type)
{
- if (AR_DEVID_7010(ah)) {
- val = val ? 0 : 1;
- REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
- AR_GPIO_BIT(gpio));
+ ath9k_hw_gpio_request(ah, gpio, true, label, ah_signal_type);
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_request_out);
+
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio)
+{
+ if (!AR_SREV_SOC(ah))
return;
+
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (ah->caps.gpio_requested & BIT(gpio)) {
+ gpio_free(gpio);
+ ah->caps.gpio_requested &= ~BIT(gpio);
}
+}
+EXPORT_SYMBOL(ath9k_hw_gpio_free);
- if (AR_SREV_9271(ah))
- val = ~val;
+u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
+{
+ u32 val = 0xffffffff;
- if ((1 << gpio) & AR_GPIO_OE_OUT_MASK)
- REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
- AR_GPIO_BIT(gpio));
- else
- gpio_set_value(gpio, val & 1);
+#define MS_REG_READ(x, y) \
+ (MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & BIT(y))
+
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (BIT(gpio) & ah->caps.gpio_mask) {
+ if (AR_SREV_9271(ah))
+ val = MS_REG_READ(AR9271, gpio);
+ else if (AR_SREV_9287(ah))
+ val = MS_REG_READ(AR9287, gpio);
+ else if (AR_SREV_9285(ah))
+ val = MS_REG_READ(AR9285, gpio);
+ else if (AR_SREV_9280(ah))
+ val = MS_REG_READ(AR928X, gpio);
+ else if (AR_DEVID_7010(ah))
+ val = REG_READ(ah, AR7010_GPIO_IN) & BIT(gpio);
+ else if (AR_SREV_9300_20_OR_LATER(ah))
+ val = REG_READ(ah, AR_GPIO_IN) & BIT(gpio);
+ else
+ val = MS_REG_READ(AR, gpio);
+ } else if (BIT(gpio) & ah->caps.gpio_requested) {
+ val = gpio_get_value(gpio) & BIT(gpio);
+ } else {
+ WARN_ON(1);
+ }
+
+ return val;
}
-EXPORT_SYMBOL(ath9k_hw_set_gpio);
+EXPORT_SYMBOL(ath9k_hw_gpio_get);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label)
+void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
- if (gpio >= ah->caps.num_gpio_pins)
- return;
+ WARN_ON(gpio >= ah->caps.num_gpio_pins);
+
+ if (AR_DEVID_7010(ah) || AR_SREV_9271(ah))
+ val = !val;
+ else
+ val = !!val;
+
+ if (BIT(gpio) & ah->caps.gpio_mask) {
+ u32 out_addr = AR_DEVID_7010(ah) ?
+ AR7010_GPIO_OUT : AR_GPIO_IN_OUT;
- gpio_request_one(gpio, GPIOF_DIR_OUT | GPIOF_INIT_LOW, label);
+ REG_RMW(ah, out_addr, val << gpio, BIT(gpio));
+ } else if (BIT(gpio) & ah->caps.gpio_requested) {
+ gpio_set_value(gpio, val);
+ } else {
+ WARN_ON(1);
+ }
}
-EXPORT_SYMBOL(ath9k_hw_request_gpio);
+EXPORT_SYMBOL(ath9k_hw_set_gpio);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
{
@@ -2833,8 +2914,7 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
- int chan_pwr, new_pwr, max_gain;
- int ant_gain, ant_reduction = 0;
+ int chan_pwr, new_pwr;
if (!chan)
return;
@@ -2842,15 +2922,10 @@ void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
- max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
-
- ant_gain = get_antenna_gain(ah, chan);
- if (ant_gain > max_gain)
- ant_reduction = ant_gain - max_gain;
ah->eep_ops->set_txpower(ah, chan,
ath9k_regd_get_ctl(reg, chan),
- ant_reduction, new_pwr, test);
+ get_antenna_gain(ah, chan), new_pwr, test);
}
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index 831a54415..9cbca1229 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -160,7 +160,6 @@
#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
-#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
#define BASE_ACTIVATE_DELAY 100
#define RTC_PLL_SETTLE_DELAY (AR_SREV_9340(ah) ? 1000 : 100)
@@ -301,6 +300,8 @@ struct ath9k_hw_capabilities {
u8 max_txchains;
u8 max_rxchains;
u8 num_gpio_pins;
+ u32 gpio_mask;
+ u32 gpio_requested;
u8 rx_hp_qdepth;
u8 rx_lp_qdepth;
u8 rx_status_len;
@@ -1019,12 +1020,12 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah);
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan);
/* GPIO / RFKILL / Antennae */
-void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio);
+void ath9k_hw_gpio_request_in(struct ath_hw *ah, u32 gpio, const char *label);
+void ath9k_hw_gpio_request_out(struct ath_hw *ah, u32 gpio, const char *label,
+ u32 ah_signal_type);
+void ath9k_hw_gpio_free(struct ath_hw *ah, u32 gpio);
u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio);
-void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
- u32 ah_signal_type);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val);
-void ath9k_hw_request_gpio(struct ath_hw *ah, u32 gpio, const char *label);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna);
/* General Operation */
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index deded2a9a..2ee862475 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -481,7 +481,7 @@ static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
{
struct ath9k_eeprom_ctx ec;
- struct ath_hw *ah = ah = sc->sc_ah;
+ struct ath_hw *ah = sc->sc_ah;
int err;
/* try to load the EEPROM content asynchronously */
@@ -667,7 +667,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_cmn_init_crypto(sc->sc_ah);
ath9k_init_misc(sc);
- ath_fill_led_pin(sc);
ath_chanctx_init(sc);
ath9k_offchannel_init(sc);
@@ -713,9 +712,9 @@ static void ath9k_init_txpower_limits(struct ath_softc *sc)
struct ath9k_channel *curchan = ah->curchan;
if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
+ ath9k_init_band_txpower(sc, NL80211_BAND_2GHZ);
if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
+ ath9k_init_band_txpower(sc, NL80211_BAND_5GHZ);
ah->curchan = curchan;
}
@@ -887,11 +886,11 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
sc->ant_tx = hw->wiphy->available_antennas_tx;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &common->sbands[IEEE80211_BAND_2GHZ];
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &common->sbands[NL80211_BAND_2GHZ];
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &common->sbands[IEEE80211_BAND_5GHZ];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &common->sbands[NL80211_BAND_5GHZ];
#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
ath9k_set_mcc_capab(sc, hw);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 3aed43a63..8b6398850 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -718,12 +718,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (!ath_complete_reset(sc, false))
ah->reset_power_on = false;
- if (ah->led_pin >= 0) {
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ if (ah->led_pin >= 0)
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 1 : 0);
- }
/*
* Reset key cache to sane defaults (all entries cleared) instead of
@@ -867,11 +864,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
spin_lock_bh(&sc->sc_pcu_lock);
- if (ah->led_pin >= 0) {
+ if (ah->led_pin >= 0)
ath9k_hw_set_gpio(ah, ah->led_pin,
(ah->config.led_active_high) ? 0 : 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
ath_prepare_reset(sc);
@@ -1938,14 +1933,14 @@ static int ath9k_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0)
ath_update_survey_stats(sc);
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
sband = NULL;
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels) {
spin_unlock_bh(&common->cc_lock);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index c8d35feba..80ff69f99 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -985,6 +985,10 @@
#define AR_SREV_9561(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9561))
+#define AR_SREV_SOC(_ah) \
+ (AR_SREV_9340(_ah) || AR_SREV_9531(_ah) || AR_SREV_9550(ah) || \
+ AR_SREV_9561(ah))
+
/* NOTE: When adding chips newer than Peacock, add chip check here */
#define AR_SREV_9580_10_OR_LATER(_ah) \
(AR_SREV_9580(_ah))
@@ -1104,14 +1108,46 @@ enum {
#define AR_PCIE_PHY_REG3 0x18c08
+/* Define correct GPIO numbers and MASK bits to indicate the WMAC
+ * GPIO resource.
+ * Allow SOC chips(AR9340, AR9531, AR9550, AR9561) to access all GPIOs
+ * which rely on gpiolib framework. But restrict SOC AR9330 only to
+ * access WMAC GPIO which has the same design with the old chips.
+ */
#define AR_NUM_GPIO 14
-#define AR928X_NUM_GPIO 10
+#define AR9280_NUM_GPIO 10
#define AR9285_NUM_GPIO 12
-#define AR9287_NUM_GPIO 11
+#define AR9287_NUM_GPIO 10
#define AR9271_NUM_GPIO 16
-#define AR9300_NUM_GPIO 17
+#define AR9300_NUM_GPIO 16
+#define AR9330_NUM_GPIO 16
+#define AR9340_NUM_GPIO 23
+#define AR9462_NUM_GPIO 14
+#define AR9485_NUM_GPIO 12
+#define AR9531_NUM_GPIO 18
+#define AR9550_NUM_GPIO 24
+#define AR9561_NUM_GPIO 23
+#define AR9565_NUM_GPIO 14
+#define AR9580_NUM_GPIO 16
#define AR7010_NUM_GPIO 16
+#define AR_GPIO_MASK 0x00003FFF
+#define AR9271_GPIO_MASK 0x0000FFFF
+#define AR9280_GPIO_MASK 0x000003FF
+#define AR9285_GPIO_MASK 0x00000FFF
+#define AR9287_GPIO_MASK 0x000003FF
+#define AR9300_GPIO_MASK 0x0000F4FF
+#define AR9330_GPIO_MASK 0x0000F4FF
+#define AR9340_GPIO_MASK 0x0000000F
+#define AR9462_GPIO_MASK 0x00003FFF
+#define AR9485_GPIO_MASK 0x00000FFF
+#define AR9531_GPIO_MASK 0x0000000F
+#define AR9550_GPIO_MASK 0x0000000F
+#define AR9561_GPIO_MASK 0x0000000F
+#define AR9565_GPIO_MASK 0x00003FFF
+#define AR9580_GPIO_MASK 0x0000F4FF
+#define AR7010_GPIO_MASK 0x0000FFFF
+
#define AR_GPIO_IN_OUT (AR_SREV_9340(ah) ? 0x4028 : 0x4048)
#define AR_GPIO_IN_VAL 0x0FFFC000
#define AR_GPIO_IN_VAL_S 14
@@ -1132,8 +1168,6 @@ enum {
#define AR_GPIO_OE_OUT (AR_SREV_9340(ah) ? 0x4030 : \
(AR_SREV_9300_20_OR_LATER(ah) ? 0x4050 : 0x404c))
-#define AR_GPIO_OE_OUT_MASK (AR_SREV_9550_OR_LATER(ah) ? \
- 0x0000000F : 0xFFFFFFFF)
#define AR_GPIO_OE_OUT_DRV 0x3
#define AR_GPIO_OE_OUT_DRV_NO 0x0
#define AR_GPIO_OE_OUT_DRV_LOW 0x1
@@ -1858,15 +1892,33 @@ enum {
#define AR9300_BT_WGHT 0xcccc4444
-#define AR_BT_COEX_MODE2 0x817c
-#define AR_BT_BCN_MISS_THRESH 0x000000ff
-#define AR_BT_BCN_MISS_THRESH_S 0
-#define AR_BT_BCN_MISS_CNT 0x0000ff00
-#define AR_BT_BCN_MISS_CNT_S 8
-#define AR_BT_HOLD_RX_CLEAR 0x00010000
-#define AR_BT_HOLD_RX_CLEAR_S 16
-#define AR_BT_DISABLE_BT_ANT 0x00100000
-#define AR_BT_DISABLE_BT_ANT_S 20
+#define AR_BT_COEX_MODE2 0x817c
+#define AR_BT_BCN_MISS_THRESH 0x000000ff
+#define AR_BT_BCN_MISS_THRESH_S 0
+#define AR_BT_BCN_MISS_CNT 0x0000ff00
+#define AR_BT_BCN_MISS_CNT_S 8
+#define AR_BT_HOLD_RX_CLEAR 0x00010000
+#define AR_BT_HOLD_RX_CLEAR_S 16
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP 0x00080000
+#define AR_BT_PROTECT_BT_AFTER_WAKEUP_S 19
+#define AR_BT_DISABLE_BT_ANT 0x00100000
+#define AR_BT_DISABLE_BT_ANT_S 20
+#define AR_BT_QUIET_2_WIRE 0x00200000
+#define AR_BT_QUIET_2_WIRE_S 21
+#define AR_BT_WL_ACTIVE_MODE 0x00c00000
+#define AR_BT_WL_ACTIVE_MODE_S 22
+#define AR_BT_WL_TXRX_SEPARATE 0x01000000
+#define AR_BT_WL_TXRX_SEPARATE_S 24
+#define AR_BT_RS_DISCARD_EXTEND 0x02000000
+#define AR_BT_RS_DISCARD_EXTEND_S 25
+#define AR_BT_TSF_BT_ACTIVE_CTRL 0x0c000000
+#define AR_BT_TSF_BT_ACTIVE_CTRL_S 26
+#define AR_BT_TSF_BT_PRIORITY_CTRL 0x30000000
+#define AR_BT_TSF_BT_PRIORITY_CTRL_S 28
+#define AR_BT_INTERRUPT_ENABLE 0x40000000
+#define AR_BT_INTERRUPT_ENABLE_S 30
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE 0x80000000
+#define AR_BT_PHY_ERR_BT_COLL_ENABLE_S 31
#define AR_TXSIFS 0x81d0
#define AR_TXSIFS_TIME 0x000000FF
@@ -1875,6 +1927,16 @@ enum {
#define AR_TXSIFS_ACK_SHIFT 0x00007000
#define AR_TXSIFS_ACK_SHIFT_S 12
+#define AR_BT_COEX_MODE3 0x81d4
+#define AR_BT_WL_ACTIVE_TIME 0x000000ff
+#define AR_BT_WL_ACTIVE_TIME_S 0
+#define AR_BT_WL_QC_TIME 0x0000ff00
+#define AR_BT_WL_QC_TIME_S 8
+#define AR_BT_ALLOW_CONCURRENT_ACCESS 0x000f0000
+#define AR_BT_ALLOW_CONCURRENT_ACCESS_S 16
+#define AR_BT_AGC_SATURATION_CNT_ENABLE 0x00100000
+#define AR_BT_AGC_SATURATION_CNT_ENABLE_S 20
+
#define AR_TXOP_X 0x81ec
#define AR_TXOP_X_VAL 0x000000FF
diff --git a/drivers/net/wireless/ath/ath9k/rng.c b/drivers/net/wireless/ath/ath9k/rng.c
index c9cb2aad7..d38e50f96 100644
--- a/drivers/net/wireless/ath/ath9k/rng.c
+++ b/drivers/net/wireless/ath/ath9k/rng.c
@@ -55,11 +55,26 @@ static int ath9k_rng_data_read(struct ath_softc *sc, u32 *buf, u32 buf_size)
return j << 2;
}
+static u32 ath9k_rng_delay_get(u32 fail_stats)
+{
+ u32 delay;
+
+ if (fail_stats < 100)
+ delay = 10;
+ else if (fail_stats < 105)
+ delay = 1000;
+ else
+ delay = 10000;
+
+ return delay;
+}
+
static int ath9k_rng_kthread(void *data)
{
int bytes_read;
struct ath_softc *sc = data;
u32 *rng_buf;
+ u32 delay, fail_stats = 0;
rng_buf = kmalloc_array(ATH9K_RNG_BUF_SIZE, sizeof(u32), GFP_KERNEL);
if (!rng_buf)
@@ -69,10 +84,13 @@ static int ath9k_rng_kthread(void *data)
bytes_read = ath9k_rng_data_read(sc, rng_buf,
ATH9K_RNG_BUF_SIZE);
if (unlikely(!bytes_read)) {
- msleep_interruptible(10);
+ delay = ath9k_rng_delay_get(++fail_stats);
+ msleep_interruptible(delay);
continue;
}
+ fail_stats = 0;
+
/* sleep until entropy bits under write_wakeup_threshold */
add_hwgenerator_randomness((void *)rng_buf, bytes_read,
ATH9K_RNG_ENTROPY(bytes_read));
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index fe795fc52..8ddd604bd 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1112,7 +1112,7 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
bool is_2ghz;
struct modal_eep_header *pmodal;
- is_2ghz = info->band == IEEE80211_BAND_2GHZ;
+ is_2ghz = info->band == NL80211_BAND_2GHZ;
pmodal = &eep->modalHeader[is_2ghz];
power_ht40delta = pmodal->ht40PowerIncForPdadc;
} else {
@@ -1236,7 +1236,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
/* legacy rates */
rate = &common->sbands[tx_info->band].bitrates[rates[i].idx];
- if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
+ if ((tx_info->band == NL80211_BAND_2GHZ) &&
!(rate->flags & IEEE80211_RATE_ERP_G))
phy = WLAN_RC_PHY_CCK;
else
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index a2f005703..7d4a72dc9 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -48,7 +48,7 @@ int carl9170_set_dyn_sifs_ack(struct ar9170 *ar)
if (conf_is_ht40(&ar->hw->conf))
val = 0x010a;
else {
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
val = 0x105;
else
val = 0x104;
@@ -66,7 +66,7 @@ int carl9170_set_rts_cts_rate(struct ar9170 *ar)
rts_rate = 0x1da;
cts_rate = 0x10a;
} else {
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
/* 11 mbit CCK */
rts_rate = 033;
cts_rate = 003;
@@ -93,7 +93,7 @@ int carl9170_set_slot_time(struct ar9170 *ar)
return 0;
}
- if ((ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ) ||
+ if ((ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) ||
vif->bss_conf.use_short_slot)
slottime = 9;
@@ -120,7 +120,7 @@ int carl9170_set_mac_rates(struct ar9170 *ar)
basic |= (vif->bss_conf.basic_rates & 0xff0) << 4;
rcu_read_unlock();
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
mandatory = 0xff00; /* OFDM 6/9/12/18/24/36/48/54 */
else
mandatory = 0xff0f; /* OFDM (6/9../54) + CCK (1/2/5.5/11) */
@@ -512,10 +512,10 @@ int carl9170_set_mac_tpc(struct ar9170 *ar, struct ieee80211_channel *channel)
chains = AR9170_TX_PHY_TXCHAIN_1;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
power = ar->power_2G_ofdm[0] & 0x3f;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
power = ar->power_5G_leg[0] & 0x3f;
break;
default:
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4d1527a2e..ffb22a04b 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1666,7 +1666,7 @@ static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
return err;
}
- for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
+ for (b = 0; b < NUM_NL80211_BANDS; b++) {
band = ar->hw->wiphy->bands[b];
if (!band)
@@ -1941,13 +1941,13 @@ static int carl9170_parse_eeprom(struct ar9170 *ar)
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&carl9170_band_2GHz;
chans += carl9170_band_2GHz.n_channels;
bands++;
}
if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
- ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&carl9170_band_5GHz;
chans += carl9170_band_5GHz.n_channels;
bands++;
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index dca6df13f..34d9fd770 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -540,11 +540,11 @@ static int carl9170_init_phy_from_eeprom(struct ar9170 *ar,
return carl9170_regwrite_result();
}
-static int carl9170_init_phy(struct ar9170 *ar, enum ieee80211_band band)
+static int carl9170_init_phy(struct ar9170 *ar, enum nl80211_band band)
{
int i, err;
u32 val;
- bool is_2ghz = band == IEEE80211_BAND_2GHZ;
+ bool is_2ghz = band == NL80211_BAND_2GHZ;
bool is_40mhz = conf_is_ht40(&ar->hw->conf);
carl9170_regwrite_begin(ar);
@@ -1125,13 +1125,13 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
u8 f, tmp;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
f = channel->center_freq - 2300;
cal_freq_pier = ar->eeprom.cal_freq_pier_2G;
i = AR5416_NUM_2G_CAL_PIERS - 1;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
f = (channel->center_freq - 4800) / 5;
cal_freq_pier = ar->eeprom.cal_freq_pier_5G;
i = AR5416_NUM_5G_CAL_PIERS - 1;
@@ -1158,12 +1158,12 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
int j;
switch (channel->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_2G[chain][idx];
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cal_pier_data = &ar->eeprom.
cal_pier_data_5G[chain][idx];
break;
@@ -1340,7 +1340,7 @@ static void carl9170_calc_ctl(struct ar9170 *ar, u32 freq, enum carl9170_bw bw)
/* skip CTL and heavy clip for CTL_MKK and CTL_ETSI */
return;
- if (ar->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (ar->hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
modes = mode_list_2ghz;
nr_modes = ARRAY_SIZE(mode_list_2ghz);
} else {
@@ -1607,7 +1607,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return err;
err = carl9170_init_rf_banks_0_7(ar,
- channel->band == IEEE80211_BAND_5GHZ);
+ channel->band == NL80211_BAND_5GHZ);
if (err)
return err;
@@ -1621,7 +1621,7 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
return err;
err = carl9170_init_rf_bank4_pwr(ar,
- channel->band == IEEE80211_BAND_5GHZ,
+ channel->band == NL80211_BAND_5GHZ,
channel->center_freq, bw);
if (err)
return err;
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index d66533cbc..0c34c8729 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -417,7 +417,7 @@ static int carl9170_rx_mac_status(struct ar9170 *ar,
return -EINVAL;
}
- if (status->band == IEEE80211_BAND_2GHZ)
+ if (status->band == NL80211_BAND_2GHZ)
status->rate_idx += 4;
break;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index ae86a600d..2bf04c9ed 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -720,12 +720,12 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
/* +1 dBm for HT40 */
*tpc += 2;
- if (info->band == IEEE80211_BAND_2GHZ)
+ if (info->band == NL80211_BAND_2GHZ)
txpower = ar->power_2G_ht40;
else
txpower = ar->power_5G_ht40;
} else {
- if (info->band == IEEE80211_BAND_2GHZ)
+ if (info->band == NL80211_BAND_2GHZ)
txpower = ar->power_2G_ht20;
else
txpower = ar->power_5G_ht20;
@@ -734,7 +734,7 @@ static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
*phyrate = txrate->idx;
*tpc += txpower[idx & 7];
} else {
- if (info->band == IEEE80211_BAND_2GHZ) {
+ if (info->band == NL80211_BAND_2GHZ) {
if (idx < 4)
txpower = ar->power_2G_cck;
else
@@ -797,7 +797,7 @@ static __le32 carl9170_tx_physet(struct ar9170 *ar,
* tmp |= cpu_to_le32(AR9170_TX_PHY_GREENFIELD);
*/
} else {
- if (info->band == IEEE80211_BAND_2GHZ) {
+ if (info->band == NL80211_BAND_2GHZ) {
if (txrate->idx <= AR9170_TX_PHY_RATE_CCK_11M)
tmp |= cpu_to_le32(AR9170_TX_PHY_MOD_CCK);
else
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 06ea6cc9e..7e15ed9ed 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -336,12 +336,12 @@ ath_reg_apply_beaconing_flags(struct wiphy *wiphy,
struct ath_regulatory *reg,
enum nl80211_reg_initiator initiator)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
@@ -374,7 +374,7 @@ ath_reg_apply_ir_flags(struct wiphy *wiphy,
{
struct ieee80211_supported_band *sband;
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
if (!sband)
return;
@@ -402,10 +402,10 @@ static void ath_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
unsigned int i;
- if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
@@ -772,7 +772,7 @@ ath_regd_init(struct ath_regulatory *reg,
EXPORT_SYMBOL(ath_regd_init);
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
if (!reg->regpair ||
(reg->country_code == CTRY_DEFAULT &&
@@ -794,9 +794,9 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
}
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
return reg->regpair->reg_2ghz_ctl;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
return reg->regpair->reg_5ghz_ctl;
default:
return NO_CTL;
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index 37f53bd8f..565d3075f 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -255,7 +255,7 @@ int ath_regd_init(struct ath_regulatory *reg, struct wiphy *wiphy,
void (*reg_notifier)(struct wiphy *wiphy,
struct regulatory_request *request));
u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
- enum ieee80211_band band);
+ enum nl80211_band band);
void ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg);
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index ef44a2da6..2a6bb62e7 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -33,9 +33,7 @@ static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
char buf[3];
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
if (vif_priv->pw_state == WCN36XX_BMPS)
buf[0] = '1';
@@ -70,9 +68,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
case 'Y':
case '1':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
wcn36xx_pmc_enter_bmps_state(wcn, vif);
@@ -83,9 +79,7 @@ static ssize_t write_file_bool_bmps(struct file *file,
case 'N':
case '0':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type)
wcn36xx_pmc_exit_bmps_state(wcn, vif);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index b947de0fb..658bfb8ba 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -48,12 +48,15 @@
#define WCN36XX_HAL_IPV4_ADDR_LEN 4
-#define WALN_HAL_STA_INVALID_IDX 0xFF
+#define WCN36XX_HAL_STA_INVALID_IDX 0xFF
#define WCN36XX_HAL_BSS_INVALID_IDX 0xFF
/* Default Beacon template size */
#define BEACON_TEMPLATE_SIZE 0x180
+/* Minimum PVM size that the FW expects. See comment in smd.c for details. */
+#define TIM_MIN_PVM_SIZE 6
+
/* Param Change Bitmap sent to HAL */
#define PARAM_BCN_INTERVAL_CHANGED (1 << 0)
#define PARAM_SHORT_PREAMBLE_CHANGED (1 << 1)
@@ -2884,11 +2887,14 @@ struct update_beacon_rsp_msg {
struct wcn36xx_hal_send_beacon_req_msg {
struct wcn36xx_hal_msg_header header;
+ /* length of the template + 6. Only qcom knows why */
+ u32 beacon_length6;
+
/* length of the template. */
u32 beacon_length;
/* Beacon data. */
- u8 beacon[BEACON_TEMPLATE_SIZE];
+ u8 beacon[BEACON_TEMPLATE_SIZE - sizeof(u32)];
u8 bssid[ETH_ALEN];
@@ -4261,9 +4267,9 @@ struct wcn36xx_hal_rcv_flt_mc_addr_list_type {
u8 data_offset;
u32 mc_addr_count;
- u8 mc_addr[ETH_ALEN][WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS];
+ u8 mc_addr[WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS][ETH_ALEN];
u8 bss_index;
-};
+} __packed;
struct wcn36xx_hal_set_pkt_filter_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4317,7 +4323,7 @@ struct wcn36xx_hal_rcv_flt_pkt_clear_rsp_msg {
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg {
struct wcn36xx_hal_msg_header header;
struct wcn36xx_hal_rcv_flt_mc_addr_list_type mc_addr_list;
-};
+} __packed;
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_rsp_msg {
struct wcn36xx_hal_msg_header header;
@@ -4383,6 +4389,45 @@ enum place_holder_in_cap_bitmap {
RTT = 20,
RATECTRL = 21,
WOW = 22,
+ WLAN_ROAM_SCAN_OFFLOAD = 23,
+ SPECULATIVE_PS_POLL = 24,
+ SCAN_SCH = 25,
+ IBSS_HEARTBEAT_OFFLOAD = 26,
+ WLAN_SCAN_OFFLOAD = 27,
+ WLAN_PERIODIC_TX_PTRN = 28,
+ ADVANCE_TDLS = 29,
+ BATCH_SCAN = 30,
+ FW_IN_TX_PATH = 31,
+ EXTENDED_NSOFFLOAD_SLOT = 32,
+ CH_SWITCH_V1 = 33,
+ HT40_OBSS_SCAN = 34,
+ UPDATE_CHANNEL_LIST = 35,
+ WLAN_MCADDR_FLT = 36,
+ WLAN_CH144 = 37,
+ NAN = 38,
+ TDLS_SCAN_COEXISTENCE = 39,
+ LINK_LAYER_STATS_MEAS = 40,
+ MU_MIMO = 41,
+ EXTENDED_SCAN = 42,
+ DYNAMIC_WMM_PS = 43,
+ MAC_SPOOFED_SCAN = 44,
+ BMU_ERROR_GENERIC_RECOVERY = 45,
+ DISA = 46,
+ FW_STATS = 47,
+ WPS_PRBRSP_TMPL = 48,
+ BCN_IE_FLT_DELTA = 49,
+ TDLS_OFF_CHANNEL = 51,
+ RTT3 = 52,
+ MGMT_FRAME_LOGGING = 53,
+ ENHANCED_TXBD_COMPLETION = 54,
+ LOGGING_ENHANCEMENT = 55,
+ EXT_SCAN_ENHANCED = 56,
+ MEMORY_DUMP_SUPPORTED = 57,
+ PER_PKT_STATS_SUPPORTED = 58,
+ EXT_LL_STAT = 60,
+ WIFI_CONFIG = 61,
+ ANTENNA_DIVERSITY_SELECTION = 62,
+
MAX_FEATURE_SUPPORTED = 128,
};
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index 27f9852e7..8e2c5ff7a 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -26,14 +26,14 @@ module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
#define CHAN2G(_freq, _idx) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 25, \
}
#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 25, \
@@ -201,7 +201,45 @@ static const char * const wcn36xx_caps_names[] = {
"BCN_FILTER", /* 19 */
"RTT", /* 20 */
"RATECTRL", /* 21 */
- "WOW" /* 22 */
+ "WOW", /* 22 */
+ "WLAN_ROAM_SCAN_OFFLOAD", /* 23 */
+ "SPECULATIVE_PS_POLL", /* 24 */
+ "SCAN_SCH", /* 25 */
+ "IBSS_HEARTBEAT_OFFLOAD", /* 26 */
+ "WLAN_SCAN_OFFLOAD", /* 27 */
+ "WLAN_PERIODIC_TX_PTRN", /* 28 */
+ "ADVANCE_TDLS", /* 29 */
+ "BATCH_SCAN", /* 30 */
+ "FW_IN_TX_PATH", /* 31 */
+ "EXTENDED_NSOFFLOAD_SLOT", /* 32 */
+ "CH_SWITCH_V1", /* 33 */
+ "HT40_OBSS_SCAN", /* 34 */
+ "UPDATE_CHANNEL_LIST", /* 35 */
+ "WLAN_MCADDR_FLT", /* 36 */
+ "WLAN_CH144", /* 37 */
+ "NAN", /* 38 */
+ "TDLS_SCAN_COEXISTENCE", /* 39 */
+ "LINK_LAYER_STATS_MEAS", /* 40 */
+ "MU_MIMO", /* 41 */
+ "EXTENDED_SCAN", /* 42 */
+ "DYNAMIC_WMM_PS", /* 43 */
+ "MAC_SPOOFED_SCAN", /* 44 */
+ "BMU_ERROR_GENERIC_RECOVERY", /* 45 */
+ "DISA", /* 46 */
+ "FW_STATS", /* 47 */
+ "WPS_PRBRSP_TMPL", /* 48 */
+ "BCN_IE_FLT_DELTA", /* 49 */
+ "TDLS_OFF_CHANNEL", /* 51 */
+ "RTT3", /* 52 */
+ "MGMT_FRAME_LOGGING", /* 53 */
+ "ENHANCED_TXBD_COMPLETION", /* 54 */
+ "LOGGING_ENHANCEMENT", /* 55 */
+ "EXT_SCAN_ENHANCED", /* 56 */
+ "MEMORY_DUMP_SUPPORTED", /* 57 */
+ "PER_PKT_STATS_SUPPORTED", /* 58 */
+ "EXT_LL_STAT", /* 60 */
+ "WIFI_CONFIG", /* 61 */
+ "ANTENNA_DIVERSITY_SELECTION", /* 62 */
};
static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
@@ -287,6 +325,7 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
}
wcn36xx_detect_chip_version(wcn);
+ wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_ENABLE_MC_ADDR_LIST, 1);
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
@@ -346,9 +385,7 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
ch);
list_for_each_entry(tmp, &wcn->vif_list, list) {
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
}
@@ -356,15 +393,57 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
return 0;
}
-#define WCN36XX_SUPPORTED_FILTERS (0)
-
static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total, u64 multicast)
{
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+ struct wcn36xx *wcn = hw->priv;
+ struct wcn36xx_vif *tmp;
+ struct ieee80211_vif *vif = NULL;
+
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
- *total &= WCN36XX_SUPPORTED_FILTERS;
+ *total &= FIF_ALLMULTI;
+
+ fp = (void *)(unsigned long)multicast;
+ list_for_each_entry(tmp, &wcn->vif_list, list) {
+ vif = wcn36xx_priv_to_vif(tmp);
+
+ /* FW handles MC filtering only when connected as STA */
+ if (*total & FIF_ALLMULTI)
+ wcn36xx_smd_set_mc_list(wcn, vif, NULL);
+ else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
+ wcn36xx_smd_set_mc_list(wcn, vif, fp);
+ }
+ kfree(fp);
+}
+
+static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw,
+ struct netdev_hw_addr_list *mc_list)
+{
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
+ struct netdev_hw_addr *ha;
+
+ wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n");
+ fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
+ if (!fp) {
+ wcn36xx_err("Out of memory setting filters.\n");
+ return 0;
+ }
+
+ fp->mc_addr_count = 0;
+ /* update multicast filtering parameters */
+ if (netdev_hw_addr_list_count(mc_list) <=
+ WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) {
+ netdev_hw_addr_list_for_each(ha, mc_list) {
+ memcpy(fp->mc_addr[fp->mc_addr_count],
+ ha->addr, ETH_ALEN);
+ fp->mc_addr_count++;
+ }
+ }
+
+ return (u64)(unsigned long)fp;
}
static void wcn36xx_tx(struct ieee80211_hw *hw,
@@ -375,7 +454,7 @@ static void wcn36xx_tx(struct ieee80211_hw *hw,
struct wcn36xx_sta *sta_priv = NULL;
if (control->sta)
- sta_priv = (struct wcn36xx_sta *)control->sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(control->sta);
if (wcn36xx_start_tx(wcn, sta_priv, skb))
ieee80211_free_txskb(wcn->hw, skb);
@@ -387,8 +466,8 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = vif_priv->sta;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
@@ -473,6 +552,7 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
wcn36xx_smd_remove_bsskey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx);
@@ -516,11 +596,11 @@ static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int i, size;
u16 *rates_table;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
u32 rates = sta->supp_rates[band];
memset(&sta_priv->supported_rates, 0,
@@ -529,7 +609,7 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
rates_table = sta_priv->supported_rates.dsss_rates;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
for (i = 0; i < size; i++) {
if (rates & 0x01) {
rates_table[i] = wcn_2ghz_rates[i].hw_value;
@@ -590,7 +670,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct sk_buff *skb = NULL;
u16 tim_off, tim_len;
enum wcn36xx_hal_link_state link_state;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%08x\n",
vif, changed);
@@ -620,7 +700,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (!is_zero_ether_addr(bss_conf->bssid)) {
vif_priv->is_joining = true;
- vif_priv->bss_index = 0xff;
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_join(wcn, bss_conf->bssid,
vif->addr, WCN36XX_HW_CHANNEL(wcn));
wcn36xx_smd_config_bss(wcn, vif, NULL,
@@ -628,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
} else {
vif_priv->is_joining = false;
wcn36xx_smd_delete_bss(wcn, vif);
+ vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
}
}
@@ -655,6 +736,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
+ vif_priv->sta_assoc = true;
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!sta) {
@@ -663,7 +745,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
rcu_read_unlock();
goto out;
}
- sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
@@ -686,6 +768,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->bssid,
vif->addr,
bss_conf->aid);
+ vif_priv->sta_assoc = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@@ -713,7 +796,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
if (bss_conf->enable_beacon) {
vif_priv->dtim_period = bss_conf->dtim_period;
- vif_priv->bss_index = 0xff;
+ vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
@@ -734,9 +817,9 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
link_state);
} else {
+ wcn36xx_smd_delete_bss(wcn, vif);
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
WCN36XX_HAL_LINK_IDLE_STATE);
- wcn36xx_smd_delete_bss(wcn, vif);
}
}
out:
@@ -757,7 +840,7 @@ static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
list_del(&vif_priv->list);
@@ -768,7 +851,7 @@ static int wcn36xx_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
vif, vif->type);
@@ -792,13 +875,12 @@ static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
vif, sta->addr);
spin_lock_init(&sta_priv->ampdu_lock);
- vif_priv->sta = sta_priv;
sta_priv->vif = vif_priv;
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
@@ -817,14 +899,12 @@ static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
vif, sta->addr, sta_priv->sta_index);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
- vif_priv->sta = NULL;
sta_priv->vif = NULL;
return 0;
}
@@ -860,7 +940,7 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_ampdu_params *params)
{
struct wcn36xx *wcn = hw->priv;
- struct wcn36xx_sta *sta_priv = NULL;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta);
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
@@ -869,8 +949,6 @@ static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
- sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
-
switch (action) {
case IEEE80211_AMPDU_RX_START:
sta_priv->tid = tid;
@@ -923,6 +1001,7 @@ static const struct ieee80211_ops wcn36xx_ops = {
.resume = wcn36xx_resume,
#endif
.config = wcn36xx_config,
+ .prepare_multicast = wcn36xx_prepare_multicast,
.configure_filter = wcn36xx_configure_filter,
.tx = wcn36xx_tx,
.set_key = wcn36xx_set_key,
@@ -958,8 +1037,8 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
- wcn->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wcn_band_2ghz;
- wcn->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wcn_band_5ghz;
+ wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
+ wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
wcn->hw->wiphy->cipher_suites = cipher_suites;
wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 28b515c81..589fe5f70 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -22,7 +22,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
int ret = 0;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
/* TODO: Make sure the TX chain clean */
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
@@ -42,7 +42,7 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
wcn36xx_err("Not in BMPS mode, no need to exit from BMPS mode!\n");
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 936938c74..06c5ddcd3 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -104,11 +104,11 @@ static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
- if (IEEE80211_BAND_5GHZ == WCN36XX_BAND(wcn))
+ if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
else if (sta && sta->ht_cap.ht_supported)
bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
- else if (sta && (sta->supp_rates[IEEE80211_BAND_2GHZ] & 0x7f))
+ else if (sta && (sta->supp_rates[NL80211_BAND_2GHZ] & 0x7f))
bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
else
bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
@@ -191,16 +191,16 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
- struct wcn36xx_sta *priv_sta = NULL;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_sta *sta_priv = NULL;
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
sta_params->type = 1;
- sta_params->sta_index = 0xFF;
+ sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX;
} else {
sta_params->type = 0;
- sta_params->sta_index = 1;
+ sta_params->sta_index = vif_priv->self_sta_index;
}
sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
@@ -215,7 +215,7 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
else
memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
- sta_params->encrypt_type = priv_vif->encrypt_type;
+ sta_params->encrypt_type = vif_priv->encrypt_type;
sta_params->short_preamble_supported = true;
sta_params->rifs_mode = 0;
@@ -224,21 +224,21 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
sta_params->uapsd = 0;
sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
sta_params->max_ampdu_duration = 0;
- sta_params->bssid_index = priv_vif->bss_index;
+ sta_params->bssid_index = vif_priv->bss_index;
sta_params->p2p = 0;
if (sta) {
- priv_sta = (struct wcn36xx_sta *)sta->drv_priv;
+ sta_priv = wcn36xx_sta_to_priv(sta);
if (NL80211_IFTYPE_STATION == vif->type)
memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
else
memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
sta_params->wmm_enabled = sta->wme;
sta_params->max_sp_len = sta->max_sp;
- sta_params->aid = priv_sta->aid;
+ sta_params->aid = sta_priv->aid;
wcn36xx_smd_set_sta_ht_params(sta, sta_params);
- memcpy(&sta_params->supported_rates, &priv_sta->supported_rates,
- sizeof(priv_sta->supported_rates));
+ memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
+ sizeof(sta_priv->supported_rates));
} else {
wcn36xx_set_default_rates(&sta_params->supported_rates);
wcn36xx_smd_set_sta_default_ht_params(sta_params);
@@ -271,6 +271,16 @@ out:
return ret;
}
+static void init_hal_msg(struct wcn36xx_hal_msg_header *hdr,
+ enum wcn36xx_hal_host_msg_type msg_type,
+ size_t msg_size)
+{
+ memset(hdr, 0, msg_size + sizeof(*hdr));
+ hdr->msg_type = msg_type;
+ hdr->msg_version = WCN36XX_HAL_MSG_VERSION0;
+ hdr->len = msg_size + sizeof(*hdr);
+}
+
#define INIT_HAL_MSG(msg_body, type) \
do { \
memset(&msg_body, 0, sizeof(msg_body)); \
@@ -302,22 +312,6 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
return 0;
}
-static int wcn36xx_smd_rsp_status_check_v2(struct wcn36xx *wcn, void *buf,
- size_t len)
-{
- struct wcn36xx_fw_msg_status_rsp_v2 *rsp;
-
- if (len < sizeof(struct wcn36xx_hal_msg_header) + sizeof(*rsp))
- return wcn36xx_smd_rsp_status_check(buf, len);
-
- rsp = buf + sizeof(struct wcn36xx_hal_msg_header);
-
- if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
- return rsp->status;
-
- return 0;
-}
-
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
struct nv_data *nv_d;
@@ -726,7 +720,7 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
size_t len)
{
struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -743,8 +737,8 @@ static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
"hal add sta self status %d self_sta_index %d dpu_index %d\n",
rsp->status, rsp->self_sta_index, rsp->dpu_index);
- priv_vif->self_sta_index = rsp->self_sta_index;
- priv_vif->self_dpu_desc_index = rsp->dpu_index;
+ vif_priv->self_sta_index = rsp->self_sta_index;
+ vif_priv->self_dpu_desc_index = rsp->dpu_index;
return 0;
}
@@ -949,17 +943,32 @@ static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
memcpy(&v1->mac, orig->mac, ETH_ALEN);
v1->aid = orig->aid;
v1->type = orig->type;
+ v1->short_preamble_supported = orig->short_preamble_supported;
v1->listen_interval = orig->listen_interval;
+ v1->wmm_enabled = orig->wmm_enabled;
v1->ht_capable = orig->ht_capable;
-
+ v1->tx_channel_width_set = orig->tx_channel_width_set;
+ v1->rifs_mode = orig->rifs_mode;
+ v1->lsig_txop_protection = orig->lsig_txop_protection;
v1->max_ampdu_size = orig->max_ampdu_size;
v1->max_ampdu_density = orig->max_ampdu_density;
v1->sgi_40mhz = orig->sgi_40mhz;
v1->sgi_20Mhz = orig->sgi_20Mhz;
-
+ v1->rmf = orig->rmf;
+ v1->encrypt_type = orig->encrypt_type;
+ v1->action = orig->action;
+ v1->uapsd = orig->uapsd;
+ v1->max_sp_len = orig->max_sp_len;
+ v1->green_field_capable = orig->green_field_capable;
+ v1->mimo_ps = orig->mimo_ps;
+ v1->delayed_ba_support = orig->delayed_ba_support;
+ v1->max_ampdu_duration = orig->max_ampdu_duration;
+ v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz;
memcpy(&v1->supported_rates, &orig->supported_rates,
sizeof(orig->supported_rates));
v1->sta_index = orig->sta_index;
+ v1->bssid_index = orig->bssid_index;
+ v1->p2p = orig->p2p;
}
static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
@@ -969,7 +978,7 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
{
struct wcn36xx_hal_config_sta_rsp_msg *rsp;
struct config_sta_rsp_params *params;
- struct wcn36xx_sta *sta_priv = (struct wcn36xx_sta *)sta->drv_priv;
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -1170,12 +1179,13 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
void *buf,
size_t len)
{
struct wcn36xx_hal_config_bss_rsp_msg *rsp;
struct wcn36xx_hal_config_bss_rsp_params *params;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
@@ -1198,14 +1208,15 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
params->bss_bcast_sta_idx, params->mac,
params->tx_mgmt_power, params->ucast_dpu_signature);
- priv_vif->bss_index = params->bss_index;
+ vif_priv->bss_index = params->bss_index;
- if (priv_vif->sta) {
- priv_vif->sta->bss_sta_index = params->bss_sta_index;
- priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
+ if (sta) {
+ struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
+ sta_priv->bss_sta_index = params->bss_sta_index;
+ sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
+ vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1217,7 +1228,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct wcn36xx_hal_config_bss_req_msg msg;
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1329,6 +1340,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
}
ret = wcn36xx_smd_config_bss_rsp(wcn,
vif,
+ sta,
wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
@@ -1343,13 +1355,13 @@ out:
int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_delete_bss_req_msg msg_body;
- struct wcn36xx_vif *priv_vif = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
- msg_body.bss_index = priv_vif->bss_index;
+ msg_body.bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -1375,26 +1387,47 @@ int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
- int ret = 0;
+ int ret = 0, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
- /* TODO need to find out why this is needed? */
- msg_body.beacon_length = skb_beacon->len + 6;
+ pvm_len = skb_beacon->data[tim_off + 1] - 3;
+ pad = TIM_MIN_PVM_SIZE - pvm_len;
- if (BEACON_TEMPLATE_SIZE > msg_body.beacon_length) {
- memcpy(&msg_body.beacon, &skb_beacon->len, sizeof(u32));
- memcpy(&(msg_body.beacon[4]), skb_beacon->data,
- skb_beacon->len);
- } else {
+ /* Padding is irrelevant to mesh mode since tim_off is always 0. */
+ if (vif->type == NL80211_IFTYPE_MESH_POINT)
+ pad = 0;
+
+ msg_body.beacon_length = skb_beacon->len + pad;
+ /* TODO need to find out why + 6 is needed */
+ msg_body.beacon_length6 = msg_body.beacon_length + 6;
+
+ if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
wcn36xx_err("Beacon is to big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
}
+ memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len);
memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
+ if (pad > 0) {
+ /*
+ * The wcn36xx FW has a fixed size for the PVM in the TIM. If
+ * given the beacon template from mac80211 with a PVM shorter
+ * than the FW expectes it will overwrite the data after the
+ * TIM.
+ */
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n",
+ pad, pvm_len);
+ memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad],
+ &msg_body.beacon[tim_off + 5 + pvm_len],
+ skb_beacon->len - (tim_off + 5 + pvm_len));
+ memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad);
+ msg_body.beacon[tim_off + 1] += pad;
+ }
+
/* TODO need to find out why this is needed? */
if (vif->type == NL80211_IFTYPE_MESH_POINT)
/* mesh beacon don't need this, so push further down */
@@ -1598,8 +1631,7 @@ int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
wcn36xx_err("Sending hal_remove_bsskey failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
- wcn->hal_rsp_len);
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
goto out;
@@ -1612,7 +1644,7 @@ out:
int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1641,8 +1673,8 @@ out:
int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
- struct wcn36xx_hal_enter_bmps_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_hal_exit_bmps_req_msg msg_body;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1703,7 +1735,7 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int packet_type)
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
- struct wcn36xx_vif *vif_priv = (struct wcn36xx_vif *)vif->drv_priv;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
@@ -1944,6 +1976,17 @@ out:
return ret;
}
+static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len)
+{
+ struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
+
+ if (len < sizeof(*rsp))
+ return -EINVAL;
+
+ rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
+ return rsp->status;
+}
+
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
@@ -1968,8 +2011,7 @@ int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index)
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check_v2(wcn, wcn->hal_buf,
- wcn->hal_rsp_len);
+ ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
@@ -2006,9 +2048,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
list_for_each_entry(tmp, &wcn->vif_list, list) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
tmp->bss_index);
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
}
return 0;
@@ -2023,9 +2063,7 @@ static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
if (tmp->bss_index == rsp->bss_index) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
rsp->bss_index);
- vif = container_of((void *)tmp,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(tmp);
ieee80211_connection_loss(vif);
return 0;
}
@@ -2041,25 +2079,24 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
struct wcn36xx_vif *tmp;
- struct ieee80211_sta *sta = NULL;
+ struct ieee80211_sta *sta;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
+ rsp->addr2, rsp->sta_id);
+
list_for_each_entry(tmp, &wcn->vif_list, list) {
- if (sta && (tmp->sta->sta_index == rsp->sta_id)) {
- sta = container_of((void *)tmp->sta,
- struct ieee80211_sta,
- drv_priv);
- wcn36xx_dbg(WCN36XX_DBG_HAL,
- "delete station indication %pM index %d\n",
- rsp->addr2,
- rsp->sta_id);
+ rcu_read_lock();
+ sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
+ if (sta)
ieee80211_report_low_ack(sta, 0);
+ rcu_read_unlock();
+ if (sta)
return 0;
- }
}
wcn36xx_warn("STA with addr %pM and index %d not found\n",
@@ -2100,6 +2137,46 @@ out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
+
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp)
+{
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
+ int ret = 0;
+
+ mutex_lock(&wcn->hal_mutex);
+
+ msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
+ wcn->hal_buf;
+ init_hal_msg(&msg_body->header, WCN36XX_HAL_8023_MULTICAST_LIST_REQ,
+ sizeof(msg_body->mc_addr_list));
+
+ /* An empty list means all mc traffic will be received */
+ if (fp)
+ memcpy(&msg_body->mc_addr_list, fp,
+ sizeof(msg_body->mc_addr_list));
+ else
+ msg_body->mc_addr_list.mc_addr_count = 0;
+
+ msg_body->mc_addr_list.bss_index = vif_priv->bss_index;
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret);
+ goto out;
+ }
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_msg_header *msg_header = buf;
@@ -2141,6 +2218,7 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
case WCN36XX_HAL_CH_SWITCH_RSP:
case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
+ case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index 8361f9e39..d93e3fd73 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 500
+#define HAL_MSG_TIMEOUT 10000
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -44,15 +44,6 @@ struct wcn36xx_fw_msg_status_rsp {
u32 status;
} __packed;
-/* wcn3620 returns this for tigger_ba */
-
-struct wcn36xx_fw_msg_status_rsp_v2 {
- u8 bss_id[6];
- u32 status __packed;
- u16 count_following_candidates __packed;
- /* candidate list follows */
-};
-
struct wcn36xx_hal_ind_msg {
struct list_head list;
u8 *msg;
@@ -136,4 +127,7 @@ int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 sta_index);
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index);
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value);
+int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
+ struct ieee80211_vif *vif,
+ struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp);
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 9bec82372..1f34c2e91 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -102,9 +102,7 @@ static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
- vif = container_of((void *)vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(vif_priv);
if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
return vif_priv;
}
@@ -167,9 +165,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
*/
if (sta_priv) {
__vif_priv = sta_priv->vif;
- vif = container_of((void *)__vif_priv,
- struct ieee80211_vif,
- drv_priv);
+ vif = wcn36xx_priv_to_vif(__vif_priv);
bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
@@ -225,7 +221,7 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
/* default rate for unicast */
if (ieee80211_is_mgmt(hdr->frame_control))
- bd->bd_rate = (WCN36XX_BAND(wcn) == IEEE80211_BAND_5GHZ) ?
+ bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
WCN36XX_BD_RATE_CTRL :
WCN36XX_BD_RATE_MGMT;
else if (ieee80211_is_ctl(hdr->frame_control))
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 0555c9502..051fb3338 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_platform_ctrl_ops {
*/
struct wcn36xx_vif {
struct list_head list;
- struct wcn36xx_sta *sta;
u8 dtim_period;
enum ani_ed_type encrypt_type;
bool is_joining;
+ bool sta_assoc;
struct wcn36xx_hal_mac_ssid ssid;
/* Power management */
@@ -263,4 +263,22 @@ struct ieee80211_sta *wcn36xx_priv_to_sta(struct wcn36xx_sta *sta_priv)
return container_of((void *)sta_priv, struct ieee80211_sta, drv_priv);
}
+static inline
+struct wcn36xx_vif *wcn36xx_vif_to_priv(struct ieee80211_vif *vif)
+{
+ return (struct wcn36xx_vif *) vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wcn36xx_priv_to_vif(struct wcn36xx_vif *vif_priv)
+{
+ return container_of((void *) vif_priv, struct ieee80211_vif, drv_priv);
+}
+
+static inline
+struct wcn36xx_sta *wcn36xx_sta_to_priv(struct ieee80211_sta *sta)
+{
+ return (struct wcn36xx_sta *)sta->drv_priv;
+}
+
#endif /* _WCN36XX_H_ */
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index fdf63d5fe..11b544b26 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -18,6 +18,7 @@ wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
wil6210-y += wil_platform.o
wil6210-y += ethtool.o
wil6210-y += wil_crash_dump.o
+wil6210-y += p2p.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 11f1bb8df..576981129 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,8 +18,10 @@
#include "wil6210.h"
#include "wmi.h"
+#define WIL_MAX_ROC_DURATION_MS 5000
+
#define CHAN60G(_channel, _flags) { \
- .band = IEEE80211_BAND_60GHZ, \
+ .band = NL80211_BAND_60GHZ, \
.center_freq = 56160 + (2160 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -76,12 +78,24 @@ wil_mgmt_stypes[NUM_NL80211_IFTYPES] = {
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
},
+ [NL80211_IFTYPE_P2P_DEVICE] = {
+ .tx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_RESP >> 4),
+ .rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
+ BIT(IEEE80211_STYPE_PROBE_REQ >> 4)
+ },
};
static const u32 wil_cipher_suites[] = {
WLAN_CIPHER_SUITE_GCMP,
};
+static const char * const key_usage_str[] = {
+ [WMI_KEY_USE_PAIRWISE] = "PTK",
+ [WMI_KEY_USE_RX_GROUP] = "RX_GTK",
+ [WMI_KEY_USE_TX_GROUP] = "TX_GTK",
+};
+
int wil_iftype_nl2wmi(enum nl80211_iftype type)
{
static const struct {
@@ -113,7 +127,7 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
.interval_usec = 0,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
@@ -226,13 +240,82 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return rc;
}
+static struct wireless_dev *
+wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
+ unsigned char name_assign_type,
+ enum nl80211_iftype type,
+ u32 *flags, struct vif_params *params)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+ struct net_device *ndev = wil_to_ndev(wil);
+ struct wireless_dev *p2p_wdev;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (type != NL80211_IFTYPE_P2P_DEVICE) {
+ wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (wil->p2p_wdev) {
+ wil_err(wil, "%s: P2P_DEVICE interface already created\n",
+ __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ p2p_wdev = kzalloc(sizeof(*p2p_wdev), GFP_KERNEL);
+ if (!p2p_wdev)
+ return ERR_PTR(-ENOMEM);
+
+ p2p_wdev->iftype = type;
+ p2p_wdev->wiphy = wiphy;
+ /* use our primary ethernet address */
+ ether_addr_copy(p2p_wdev->address, ndev->perm_addr);
+
+ wil->p2p_wdev = p2p_wdev;
+
+ return p2p_wdev;
+}
+
+static int wil_cfg80211_del_iface(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ if (wdev != wil->p2p_wdev) {
+ wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
+ __func__, wdev);
+ return -EINVAL;
+ }
+
+ wil_p2p_wdev_free(wil);
+
+ return 0;
+}
+
static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
+ int rc;
+
+ wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+
+ if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
+ wil_dbg_misc(wil, "interface is up. resetting...\n");
+ mutex_lock(&wil->mutex);
+ __wil_down(wil);
+ rc = __wil_up(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (rc)
+ return rc;
+ }
switch (type) {
case NL80211_IFTYPE_STATION:
@@ -260,7 +343,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
struct cfg80211_scan_request *request)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = request->wdev;
struct {
struct wmi_start_scan_cmd cmd;
u16 chnl[4];
@@ -268,6 +351,9 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
uint i, n;
int rc;
+ wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
+ __func__, wdev, wdev->iftype);
+
if (wil->scan_request) {
wil_err(wil, "Already scanning\n");
return -EAGAIN;
@@ -277,6 +363,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_DEVICE:
break;
default:
return -EOPNOTSUPP;
@@ -288,6 +375,21 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
return -EBUSY;
}
+ /* social scan on P2P_DEVICE is handled as p2p search */
+ if (wdev->iftype == NL80211_IFTYPE_P2P_DEVICE &&
+ wil_p2p_is_social_scan(request)) {
+ wil->scan_request = request;
+ wil->radio_wdev = wdev;
+ rc = wil_p2p_search(wil, request);
+ if (rc) {
+ wil->radio_wdev = wil_to_wdev(wil);
+ wil->scan_request = NULL;
+ }
+ return rc;
+ }
+
+ (void)wil_p2p_stop_discovery(wil);
+
wil_dbg_misc(wil, "Start scan_request 0x%p\n", request);
wil_dbg_misc(wil, "SSID count: %d", request->n_ssids);
@@ -313,6 +415,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
mod_timer(&wil->scan_timer, jiffies + WIL6210_SCAN_TO);
memset(&cmd, 0, sizeof(cmd));
+ cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
cmd.cmd.num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
@@ -340,12 +443,19 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc)
goto out;
+ if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
+ cmd.cmd.discovery_mode = 1;
+ wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
+ }
+
+ wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, &cmd, sizeof(cmd.cmd) +
cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
out:
if (rc) {
del_timer_sync(&wil->scan_timer);
+ wil->radio_wdev = wil_to_wdev(wil);
wil->scan_request = NULL;
}
@@ -390,6 +500,7 @@ static void wil_print_connect_params(struct wil6210_priv *wil,
print_hex_dump(KERN_INFO, " SSID: ", DUMP_PREFIX_OFFSET,
16, 1, sme->ssid, sme->ssid_len, true);
wil_info(wil, " Privacy: %s\n", sme->privacy ? "secure" : "open");
+ wil_info(wil, " PBSS: %d\n", sme->pbss);
wil_print_crypto(wil, &sme->crypto);
}
@@ -404,7 +515,9 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
const u8 *rsn_eid;
int ch;
int rc = 0;
+ enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
+ wil_dbg_misc(wil, "%s()\n", __func__);
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -422,14 +535,12 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
if (sme->privacy && !rsn_eid)
wil_info(wil, "WSC connection\n");
- if (sme->pbss) {
- wil_err(wil, "connect - PBSS not yet supported\n");
- return -EOPNOTSUPP;
- }
+ if (sme->pbss)
+ bss_type = IEEE80211_BSS_TYPE_PBSS;
bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid,
sme->ssid, sme->ssid_len,
- IEEE80211_BSS_TYPE_ESS, IEEE80211_PRIVACY_ANY);
+ bss_type, IEEE80211_PRIVACY_ANY);
if (!bss) {
wil_err(wil, "Unable to find BSS\n");
return -ENOENT;
@@ -568,10 +679,20 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_mgmt *mgmt_frame = (void *)buf;
struct wmi_sw_tx_req_cmd *cmd;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_sw_tx_complete_event evt;
} __packed evt;
+ /* Note, currently we do not support the "wait" parameter, user-space
+ * must call remain_on_channel before mgmt_tx or listen on a channel
+ * another way (AP/PCP or connected station)
+ * in addition we need to check if specified "chan" argument is
+ * different from currently "listened" channel and fail if it is.
+ */
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+ print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
+
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
@@ -598,7 +719,7 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
struct cfg80211_chan_def *chandef)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
wdev->preset_chandef = *chandef;
@@ -608,22 +729,19 @@ static int wil_cfg80211_set_channel(struct wiphy *wiphy,
static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
bool pairwise)
{
- struct wireless_dev *wdev = wil->wdev;
+ struct wireless_dev *wdev = wil_to_wdev(wil);
enum wmi_key_usage rc;
- static const char * const key_usage_str[] = {
- [WMI_KEY_USE_PAIRWISE] = "WMI_KEY_USE_PAIRWISE",
- [WMI_KEY_USE_RX_GROUP] = "WMI_KEY_USE_RX_GROUP",
- [WMI_KEY_USE_TX_GROUP] = "WMI_KEY_USE_TX_GROUP",
- };
if (pairwise) {
rc = WMI_KEY_USE_PAIRWISE;
} else {
switch (wdev->iftype) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
rc = WMI_KEY_USE_RX_GROUP;
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
rc = WMI_KEY_USE_TX_GROUP;
break;
default:
@@ -638,20 +756,86 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
return rc;
}
+static struct wil_tid_crypto_rx_single *
+wil_find_crypto_ctx(struct wil6210_priv *wil, u8 key_index,
+ enum wmi_key_usage key_usage, const u8 *mac_addr)
+{
+ int cid = -EINVAL;
+ int tid = 0;
+ struct wil_sta_info *s;
+ struct wil_tid_crypto_rx *c;
+
+ if (key_usage == WMI_KEY_USE_TX_GROUP)
+ return NULL; /* not needed */
+
+ /* supplicant provides Rx group key in STA mode with NULL MAC address */
+ if (mac_addr)
+ cid = wil_find_cid(wil, mac_addr);
+ else if (key_usage == WMI_KEY_USE_RX_GROUP)
+ cid = wil_find_cid_by_idx(wil, 0);
+ if (cid < 0) {
+ wil_err(wil, "No CID for %pM %s[%d]\n", mac_addr,
+ key_usage_str[key_usage], key_index);
+ return ERR_PTR(cid);
+ }
+
+ s = &wil->sta[cid];
+ if (key_usage == WMI_KEY_USE_PAIRWISE)
+ c = &s->tid_crypto_rx[tid];
+ else
+ c = &s->group_crypto_rx;
+
+ return &c->key_id[key_index];
+}
+
static int wil_cfg80211_add_key(struct wiphy *wiphy,
struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr,
struct key_params *params)
{
+ int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+ struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+ key_index,
+ key_usage,
+ mac_addr);
+
+ wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+ mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+
+ if (IS_ERR(cc)) {
+ wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
+ __func__, mac_addr, key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
- wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
- pairwise ? "PTK" : "GTK");
+ if (cc)
+ cc->key_set = false;
+
+ if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
+ wil_err(wil,
+ "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
+ params->seq_len, __func__, mac_addr,
+ key_usage_str[key_usage], key_index,
+ params->seq_len, params->seq);
+ return -EINVAL;
+ }
+
+ rc = wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
+ params->key, key_usage);
+ if ((rc == 0) && cc) {
+ if (params->seq)
+ memcpy(cc->pn, params->seq, IEEE80211_GCMP_PN_LEN);
+ else
+ memset(cc->pn, 0, IEEE80211_GCMP_PN_LEN);
+ cc->key_set = true;
+ }
- return wmi_add_cipher_key(wil, key_index, mac_addr, params->key_len,
- params->key, key_usage);
+ return rc;
}
static int wil_cfg80211_del_key(struct wiphy *wiphy,
@@ -661,9 +845,20 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
enum wmi_key_usage key_usage = wil_detect_key_usage(wil, pairwise);
+ struct wil_tid_crypto_rx_single *cc = wil_find_crypto_ctx(wil,
+ key_index,
+ key_usage,
+ mac_addr);
+
+ wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+ key_usage_str[key_usage], key_index);
+
+ if (IS_ERR(cc))
+ wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+ mac_addr, key_usage_str[key_usage], key_index);
- wil_dbg_misc(wil, "%s(%pM[%d] %s)\n", __func__, mac_addr, key_index,
- pairwise ? "PTK" : "GTK");
+ if (!IS_ERR_OR_NULL(cc))
+ cc->key_set = false;
return wmi_del_cipher_key(wil, key_index, mac_addr, key_usage);
}
@@ -674,6 +869,9 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
u8 key_index, bool unicast,
bool multicast)
{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
return 0;
}
@@ -686,16 +884,19 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
- /* TODO: handle duration */
- wil_info(wil, "%s(%d, %d ms)\n", __func__, chan->center_freq, duration);
+ wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
+ __func__, chan->center_freq, duration, wdev->iftype);
- rc = wmi_set_channel(wil, chan->hw_value);
+ rc = wil_p2p_listen(wil, duration, chan, cookie);
if (rc)
return rc;
- rc = wmi_rxon(wil, true);
+ wil->radio_wdev = wdev;
- return rc;
+ cfg80211_ready_on_channel(wdev, *cookie, chan, duration,
+ GFP_KERNEL);
+
+ return 0;
}
static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -703,13 +904,10 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- int rc;
-
- wil_info(wil, "%s()\n", __func__);
- rc = wmi_rxon(wil, false);
+ wil_dbg_misc(wil, "%s()\n", __func__);
- return rc;
+ return wil_p2p_cancel_listen(wil, cookie);
}
/**
@@ -852,12 +1050,22 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
const u8 *ssid, size_t ssid_len, u32 privacy,
int bi, u8 chan,
struct cfg80211_beacon_data *bcon,
- u8 hidden_ssid)
+ u8 hidden_ssid, u32 pbss)
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
u8 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);
+ u8 is_go = (wdev->iftype == NL80211_IFTYPE_P2P_GO);
+
+ if (pbss)
+ wmi_nettype = WMI_NETTYPE_P2P;
+
+ wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+ if (is_go && !pbss) {
+ wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+ return -ENOTSUPP;
+ }
wil_set_recovery_state(wil, fw_recovery_idle);
@@ -879,10 +1087,11 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
wil->privacy = privacy;
wil->channel = chan;
wil->hidden_ssid = hidden_ssid;
+ wil->pbss = pbss;
netif_carrier_on(ndev);
- rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid);
+ rc = wmi_pcp_start(wil, bi, wmi_nettype, chan, hidden_ssid, is_go);
if (rc)
goto err_pcp_start;
@@ -928,7 +1137,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
wdev->ssid_len, privacy,
wdev->beacon_interval,
wil->channel, bcon,
- wil->hidden_ssid);
+ wil->hidden_ssid,
+ wil->pbss);
} else {
rc = _wil_cfg80211_set_ies(wiphy, bcon);
}
@@ -954,11 +1164,6 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
return -EINVAL;
}
- if (info->pbss) {
- wil_err(wil, "AP: PBSS not yet supported\n");
- return -EOPNOTSUPP;
- }
-
switch (info->hidden_ssid) {
case NL80211_HIDDEN_SSID_NOT_IN_USE:
hidden_ssid = WMI_HIDDEN_SSID_DISABLED;
@@ -984,6 +1189,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
info->hidden_ssid);
wil_dbg_misc(wil, "BI %d DTIM %d\n", info->beacon_interval,
info->dtim_period);
+ wil_dbg_misc(wil, "PBSS %d\n", info->pbss);
print_hex_dump_bytes("SSID ", DUMP_PREFIX_OFFSET,
info->ssid, info->ssid_len);
wil_print_bcon_data(bcon);
@@ -992,7 +1198,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
- bcon, hidden_ssid);
+ bcon, hidden_ssid, info->pbss);
return rc;
}
@@ -1139,7 +1345,26 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
return 0;
}
+static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
+ return 0;
+}
+
+static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
+ struct wireless_dev *wdev)
+{
+ struct wil6210_priv *wil = wiphy_to_wil(wiphy);
+
+ wil_dbg_misc(wil, "%s: entered\n", __func__);
+}
+
static struct cfg80211_ops wil_cfg80211_ops = {
+ .add_virtual_intf = wil_cfg80211_add_iface,
+ .del_virtual_intf = wil_cfg80211_del_iface,
.scan = wil_cfg80211_scan,
.connect = wil_cfg80211_connect,
.disconnect = wil_cfg80211_disconnect,
@@ -1160,20 +1385,25 @@ static struct cfg80211_ops wil_cfg80211_ops = {
.del_station = wil_cfg80211_del_station,
.probe_client = wil_cfg80211_probe_client,
.change_bss = wil_cfg80211_change_bss,
+ /* P2P device */
+ .start_p2p_device = wil_cfg80211_start_p2p_device,
+ .stop_p2p_device = wil_cfg80211_stop_p2p_device,
};
static void wil_wiphy_init(struct wiphy *wiphy)
{
wiphy->max_scan_ssids = 1;
wiphy->max_scan_ie_len = WMI_MAX_IE_LEN;
+ wiphy->max_remain_on_channel_duration = WIL_MAX_ROC_DURATION_MS;
wiphy->max_num_pmkids = 0 /* TODO: */;
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_MONITOR);
- /* TODO: enable P2P when integrated with supplicant:
- * BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO)
- */
wiphy->flags |= WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
dev_dbg(wiphy_dev(wiphy), "%s : flags = 0x%08x\n",
__func__, wiphy->flags);
@@ -1182,7 +1412,7 @@ static void wil_wiphy_init(struct wiphy *wiphy)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
- wiphy->bands[IEEE80211_BAND_60GHZ] = &wil_band_60ghz;
+ wiphy->bands[NL80211_BAND_60GHZ] = &wil_band_60ghz;
/* TODO: figure this out */
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
@@ -1241,3 +1471,18 @@ void wil_wdev_free(struct wil6210_priv *wil)
wiphy_free(wdev->wiphy);
kfree(wdev);
}
+
+void wil_p2p_wdev_free(struct wil6210_priv *wil)
+{
+ struct wireless_dev *p2p_wdev;
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ p2p_wdev = wil->p2p_wdev;
+ if (p2p_wdev) {
+ wil->p2p_wdev = NULL;
+ wil->radio_wdev = wil_to_wdev(wil);
+ cfg80211_unregister_wdev(p2p_wdev);
+ kfree(p2p_wdev);
+ }
+ mutex_unlock(&wil->p2p_wdev_mutex);
+}
diff --git a/drivers/net/wireless/ath/wil6210/debug.c b/drivers/net/wireless/ath/wil6210/debug.c
index 3249562d9..c312a667c 100644
--- a/drivers/net/wireless/ath/wil6210/debug.c
+++ b/drivers/net/wireless/ath/wil6210/debug.c
@@ -17,7 +17,7 @@
#include "wil6210.h"
#include "trace.h"
-void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {
@@ -32,7 +32,7 @@ void wil_err(struct wil6210_priv *wil, const char *fmt, ...)
va_end(args);
}
-void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
{
if (net_ratelimit()) {
struct net_device *ndev = wil_to_ndev(wil);
@@ -49,7 +49,23 @@ void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...)
}
}
-void wil_info(struct wil6210_priv *wil, const char *fmt, ...)
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (!net_ratelimit())
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ netdev_dbg(wil_to_ndev(wil), "%pV", &vaf);
+ trace_wil6210_log_dbg(&vaf);
+ va_end(args);
+}
+
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...)
{
struct net_device *ndev = wil_to_ndev(wil);
struct va_format vaf = {
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 3bbe73b6d..a8098b406 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -37,6 +37,7 @@ enum dbg_off_type {
doff_x32 = 1,
doff_ulong = 2,
doff_io32 = 3,
+ doff_u8 = 4
};
/* offset to "wil" */
@@ -170,6 +171,8 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
int rsize;
uint i;
+ wil_halp_vote(wil);
+
wil_memcpy_fromio_32(&r, off, sizeof(r));
wil_mbox_ring_le2cpus(&r);
/*
@@ -235,6 +238,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
}
out:
seq_puts(s, "}\n");
+ wil_halp_unvote(wil);
}
static int wil_mbox_debugfs_show(struct seq_file *s, void *data)
@@ -346,6 +350,10 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
tbl[i].mode, dbg,
base + tbl[i].off);
break;
+ case doff_u8:
+ f = debugfs_create_u8(tbl[i].name, tbl[i].mode, dbg,
+ base + tbl[i].off);
+ break;
default:
f = ERR_PTR(-EINVAL);
}
@@ -495,9 +503,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
enum { max_count = 4096 };
- struct debugfs_blob_wrapper *blob = file->private_data;
+ struct wil_blob_wrapper *wil_blob = file->private_data;
loff_t pos = *ppos;
- size_t available = blob->size;
+ size_t available = wil_blob->blob.size;
void *buf;
size_t ret;
@@ -516,8 +524,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- wil_memcpy_fromio_32(buf, (const volatile void __iomem *)blob->data +
- pos, count);
+ wil_memcpy_fromio_halp_vote(wil_blob->wil, buf,
+ (const volatile void __iomem *)
+ wil_blob->blob.data + pos, count);
ret = copy_to_user(user_buf, buf, count);
kfree(buf);
@@ -540,9 +549,9 @@ static
struct dentry *wil_debugfs_create_ioblob(const char *name,
umode_t mode,
struct dentry *parent,
- struct debugfs_blob_wrapper *blob)
+ struct wil_blob_wrapper *wil_blob)
{
- return debugfs_create_file(name, mode, parent, blob, &fops_ioblob);
+ return debugfs_create_file(name, mode, parent, wil_blob, &fops_ioblob);
}
/*---reset---*/
@@ -821,13 +830,13 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
struct wil6210_priv *wil = file->private_data;
- struct wil6210_mbox_hdr_wmi *wmi;
+ struct wmi_cmd_hdr *wmi;
void *cmd;
- int cmdlen = len - sizeof(struct wil6210_mbox_hdr_wmi);
+ int cmdlen = len - sizeof(struct wmi_cmd_hdr);
u16 cmdid;
int rc, rc1;
- if (cmdlen <= 0)
+ if (cmdlen < 0)
return -EINVAL;
wmi = kmalloc(len, GFP_KERNEL);
@@ -840,8 +849,8 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
return rc;
}
- cmd = &wmi[1];
- cmdid = le16_to_cpu(wmi->id);
+ cmd = (cmdlen > 0) ? &wmi[1] : NULL;
+ cmdid = le16_to_cpu(wmi->command_id);
rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
kfree(wmi);
@@ -985,7 +994,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data)
.interval_usec = 0,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_notify_req_done_event evt;
} __packed reply;
@@ -1333,6 +1342,34 @@ static void wil_print_rxtid(struct seq_file *s, struct wil_tid_ampdu_rx *r)
r->ssn_last_drop);
}
+static void wil_print_rxtid_crypto(struct seq_file *s, int tid,
+ struct wil_tid_crypto_rx *c)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ if (cc->key_set)
+ goto has_keys;
+ }
+ return;
+
+has_keys:
+ if (tid < WIL_STA_TID_NUM)
+ seq_printf(s, " [%2d] PN", tid);
+ else
+ seq_puts(s, " [GR] PN");
+
+ for (i = 0; i < 4; i++) {
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[i];
+
+ seq_printf(s, " [%i%s]%6phN", i, cc->key_set ? "+" : "-",
+ cc->pn);
+ }
+ seq_puts(s, "\n");
+}
+
static int wil_sta_debugfs_show(struct seq_file *s, void *data)
__acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
{
@@ -1360,18 +1397,25 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
spin_lock_bh(&p->tid_rx_lock);
for (tid = 0; tid < WIL_STA_TID_NUM; tid++) {
struct wil_tid_ampdu_rx *r = p->tid_rx[tid];
+ struct wil_tid_crypto_rx *c =
+ &p->tid_crypto_rx[tid];
if (r) {
- seq_printf(s, "[%2d] ", tid);
+ seq_printf(s, " [%2d] ", tid);
wil_print_rxtid(s, r);
}
+
+ wil_print_rxtid_crypto(s, tid, c);
}
+ wil_print_rxtid_crypto(s, WIL_STA_TID_NUM,
+ &p->group_crypto_rx);
spin_unlock_bh(&p->tid_rx_lock);
seq_printf(s,
- "Rx invalid frame: non-data %lu, short %lu, large %lu\n",
+ "Rx invalid frame: non-data %lu, short %lu, large %lu, replay %lu\n",
p->stats.rx_non_data_frame,
p->stats.rx_short_frame,
- p->stats.rx_large_frame);
+ p->stats.rx_large_frame,
+ p->stats.rx_replay);
seq_puts(s, "Rx/MCS:");
for (mcs = 0; mcs < ARRAY_SIZE(p->stats.rx_per_mcs);
@@ -1397,6 +1441,118 @@ static const struct file_operations fops_sta = {
.llseek = seq_lseek,
};
+static ssize_t wil_read_file_led_cfg(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char buf[80];
+ int n;
+
+ n = snprintf(buf, sizeof(buf),
+ "led_id is set to %d, echo 1 to enable, 0 to disable\n",
+ led_id);
+
+ n = min_t(int, n, sizeof(buf));
+
+ return simple_read_from_buffer(user_buf, count, ppos,
+ buf, n);
+}
+
+static ssize_t wil_write_file_led_cfg(struct file *file,
+ const char __user *buf_,
+ size_t count, loff_t *ppos)
+{
+ struct wil6210_priv *wil = file->private_data;
+ int val;
+ int rc;
+
+ rc = kstrtoint_from_user(buf_, count, 0, &val);
+ if (rc) {
+ wil_err(wil, "Invalid argument\n");
+ return rc;
+ }
+
+ wil_info(wil, "%s led %d\n", val ? "Enabling" : "Disabling", led_id);
+ rc = wmi_led_cfg(wil, val);
+ if (rc) {
+ wil_info(wil, "%s led %d failed\n",
+ val ? "Enabling" : "Disabling", led_id);
+ return rc;
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_led_cfg = {
+ .read = wil_read_file_led_cfg,
+ .write = wil_write_file_led_cfg,
+ .open = simple_open,
+};
+
+/* led_blink_time, write:
+ * "<blink_on_slow> <blink_off_slow> <blink_on_med> <blink_off_med> <blink_on_fast> <blink_off_fast>
+ */
+static ssize_t wil_write_led_blink_time(struct file *file,
+ const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ int rc;
+ char *kbuf = kmalloc(len + 1, GFP_KERNEL);
+
+ if (!kbuf)
+ return -ENOMEM;
+
+ rc = simple_write_to_buffer(kbuf, len, ppos, buf, len);
+ if (rc != len) {
+ kfree(kbuf);
+ return rc >= 0 ? -EIO : rc;
+ }
+
+ kbuf[len] = '\0';
+ rc = sscanf(kbuf, "%d %d %d %d %d %d",
+ &led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ &led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ &led_blink_time[WIL_LED_TIME_MED].on_ms,
+ &led_blink_time[WIL_LED_TIME_MED].off_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ &led_blink_time[WIL_LED_TIME_FAST].off_ms);
+ kfree(kbuf);
+
+ if (rc < 0)
+ return rc;
+ if (rc < 6)
+ return -EINVAL;
+
+ return len;
+}
+
+static ssize_t wil_read_led_blink_time(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ static char text[400];
+
+ snprintf(text, sizeof(text),
+ "To set led blink on/off time variables write:\n"
+ "<blink_on_slow> <blink_off_slow> <blink_on_med> "
+ "<blink_off_med> <blink_on_fast> <blink_off_fast>\n"
+ "The current values are:\n"
+ "%d %d %d %d %d %d\n",
+ led_blink_time[WIL_LED_TIME_SLOW].on_ms,
+ led_blink_time[WIL_LED_TIME_SLOW].off_ms,
+ led_blink_time[WIL_LED_TIME_MED].on_ms,
+ led_blink_time[WIL_LED_TIME_MED].off_ms,
+ led_blink_time[WIL_LED_TIME_FAST].on_ms,
+ led_blink_time[WIL_LED_TIME_FAST].off_ms);
+
+ return simple_read_from_buffer(user_buf, count, ppos, text,
+ sizeof(text));
+}
+
+static const struct file_operations fops_led_blink_time = {
+ .read = wil_read_led_blink_time,
+ .write = wil_write_led_blink_time,
+ .open = simple_open,
+};
+
/*----------------*/
static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
struct dentry *dbg)
@@ -1405,16 +1561,18 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
char name[32];
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
- struct debugfs_blob_wrapper *blob = &wil->blobs[i];
+ struct wil_blob_wrapper *wil_blob = &wil->blobs[i];
+ struct debugfs_blob_wrapper *blob = &wil_blob->blob;
const struct fw_map *map = &fw_mapping[i];
if (!map->name)
continue;
+ wil_blob->wil = wil;
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
- wil_debugfs_create_ioblob(name, S_IRUGO, dbg, blob);
+ wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
}
}
@@ -1443,6 +1601,8 @@ static const struct {
{"link", S_IRUGO, &fops_link},
{"info", S_IRUGO, &fops_info},
{"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
+ {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
+ {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1487,6 +1647,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(hw_version, S_IRUGO, doff_x32),
WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
+ WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
{},
};
@@ -1504,6 +1665,7 @@ static const struct dbg_off dbg_statics[] = {
{"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
{"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
doff_u32},
+ {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 4f2ffa5c6..011e7412d 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -35,15 +35,19 @@
*
*/
-#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IRQ_DISABLE (0xFFFFFFFFUL)
+#define WIL6210_IRQ_DISABLE_NO_HALP (0xF7FFFFFFUL)
#define WIL6210_IMC_RX (BIT_DMA_EP_RX_ICR_RX_DONE | \
BIT_DMA_EP_RX_ICR_RX_HTRSH)
+#define WIL6210_IMC_RX_NO_RX_HTRSH (WIL6210_IMC_RX & \
+ (~(BIT_DMA_EP_RX_ICR_RX_HTRSH)))
#define WIL6210_IMC_TX (BIT_DMA_EP_TX_ICR_TX_DONE | \
BIT_DMA_EP_TX_ICR_TX_DONE_N(0))
-#define WIL6210_IMC_MISC (ISR_MISC_FW_READY | \
- ISR_MISC_MBOX_EVT | \
- ISR_MISC_FW_ERROR)
-
+#define WIL6210_IMC_MISC_NO_HALP (ISR_MISC_FW_READY | \
+ ISR_MISC_MBOX_EVT | \
+ ISR_MISC_FW_ERROR)
+#define WIL6210_IMC_MISC (WIL6210_IMC_MISC_NO_HALP | \
+ BIT_DMA_EP_MISC_ICR_HALP)
#define WIL6210_IRQ_PSEUDO_MASK (u32)(~(BIT_DMA_PSEUDO_CAUSE_RX | \
BIT_DMA_PSEUDO_CAUSE_TX | \
BIT_DMA_PSEUDO_CAUSE_MISC))
@@ -51,6 +55,7 @@
#if defined(CONFIG_WIL6210_ISR_COR)
/* configure to Clear-On-Read mode */
#define WIL_ICR_ICC_VALUE (0xFFFFFFFFUL)
+#define WIL_ICR_ICC_MISC_VALUE (0xF7FFFFFFUL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@@ -58,6 +63,7 @@ static inline void wil_icr_clear(u32 x, void __iomem *addr)
#else /* defined(CONFIG_WIL6210_ISR_COR) */
/* configure to Write-1-to-Clear mode */
#define WIL_ICR_ICC_VALUE (0UL)
+#define WIL_ICR_ICC_MISC_VALUE (0UL)
static inline void wil_icr_clear(u32 x, void __iomem *addr)
{
@@ -86,10 +92,21 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
WIL6210_IRQ_DISABLE);
}
-static void wil6210_mask_irq_misc(struct wil6210_priv *wil)
+static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
+ wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+ mask_halp ? "true" : "false");
+
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
- WIL6210_IRQ_DISABLE);
+ mask_halp ? WIL6210_IRQ_DISABLE : WIL6210_IRQ_DISABLE_NO_HALP);
+}
+
+static void wil6210_mask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
+ BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
@@ -109,14 +126,27 @@ void wil6210_unmask_irq_tx(struct wil6210_priv *wil)
void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
{
+ bool unmask_rx_htrsh = test_bit(wil_status_fwconnected, wil->status);
+
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, IMC),
- WIL6210_IMC_RX);
+ unmask_rx_htrsh ? WIL6210_IMC_RX : WIL6210_IMC_RX_NO_RX_HTRSH);
}
-static void wil6210_unmask_irq_misc(struct wil6210_priv *wil)
+static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
+ wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+ unmask_halp ? "true" : "false");
+
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
- WIL6210_IMC_MISC);
+ unmask_halp ? WIL6210_IMC_MISC : WIL6210_IMC_MISC_NO_HALP);
+}
+
+static void wil6210_unmask_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_irq(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
+ BIT_DMA_EP_MISC_ICR_HALP);
}
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
@@ -134,7 +164,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
- wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_misc(wil, true);
wil6210_mask_irq_pseudo(wil);
}
@@ -147,12 +177,12 @@ void wil_unmask_irq(struct wil6210_priv *wil)
wil_w(wil, RGF_DMA_EP_TX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICC),
- WIL_ICR_ICC_VALUE);
+ WIL_ICR_ICC_MISC_VALUE);
wil6210_unmask_irq_pseudo(wil);
wil6210_unmask_irq_tx(wil);
wil6210_unmask_irq_rx(wil);
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, true);
}
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
@@ -228,11 +258,8 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
*/
if (likely(isr & (BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH))) {
- wil_dbg_irq(wil, "RX done\n");
-
- if (unlikely(isr & BIT_DMA_EP_RX_ICR_RX_HTRSH))
- wil_err_ratelimited(wil,
- "Received \"Rx buffer is in risk of overflow\" interrupt\n");
+ wil_dbg_irq(wil, "RX done / RX_HTRSH received, ISR (0x%x)\n",
+ isr);
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
@@ -344,7 +371,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
return IRQ_NONE;
}
- wil6210_mask_irq_misc(wil);
+ wil6210_mask_irq_misc(wil, false);
if (isr & ISR_MISC_FW_ERROR) {
u32 fw_assert_code = wil_r(wil, RGF_FW_ASSERT_CODE);
@@ -372,12 +399,19 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
isr &= ~ISR_MISC_FW_READY;
}
+ if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
+ wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+ wil6210_mask_halp(wil);
+ isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
+ complete(&wil->halp.comp);
+ }
+
wil->isr_misc = isr;
if (isr) {
return IRQ_WAKE_THREAD;
} else {
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
}
@@ -391,12 +425,14 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil_dbg_irq(wil, "Thread ISR MISC 0x%08x\n", isr);
if (isr & ISR_MISC_FW_ERROR) {
+ wil->recovery_state = fw_recovery_pending;
wil_fw_core_dump(wil);
wil_notify_fw_error(wil);
isr &= ~ISR_MISC_FW_ERROR;
- if (wil->platform_ops.notify_crash) {
+ if (wil->platform_ops.notify) {
wil_err(wil, "notify platform driver about FW crash");
- wil->platform_ops.notify_crash(wil->platform_handle);
+ wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_CRASH);
} else {
wil_fw_error_recovery(wil);
}
@@ -412,7 +448,7 @@ static irqreturn_t wil6210_irq_misc_thread(int irq, void *cookie)
wil->isr_misc = 0;
- wil6210_unmask_irq_misc(wil);
+ wil6210_unmask_irq_misc(wil, false);
return IRQ_HANDLED;
}
@@ -554,6 +590,23 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
wmb(); /* make sure write completed */
}
+void wil6210_set_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
+ BIT_DMA_EP_MISC_ICR_HALP);
+}
+
+void wil6210_clear_halp(struct wil6210_priv *wil)
+{
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
+ BIT_DMA_EP_MISC_ICR_HALP);
+ wil6210_unmask_halp(wil);
+}
+
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
diff --git a/drivers/net/wireless/ath/wil6210/ioctl.c b/drivers/net/wireless/ath/wil6210/ioctl.c
index f7f948621..630380078 100644
--- a/drivers/net/wireless/ath/wil6210/ioctl.c
+++ b/drivers/net/wireless/ath/wil6210/ioctl.c
@@ -161,13 +161,20 @@ out_free:
int wil_ioctl(struct wil6210_priv *wil, void __user *data, int cmd)
{
+ int ret;
+
switch (cmd) {
case WIL_IOCTL_MEMIO:
- return wil_ioc_memio_dword(wil, data);
+ ret = wil_ioc_memio_dword(wil, data);
+ break;
case WIL_IOCTL_MEMIO_BLOCK:
- return wil_ioc_memio_block(wil, data);
+ ret = wil_ioc_memio_block(wil, data);
+ break;
default:
wil_dbg_ioctl(wil, "Unsupported IOCTL 0x%04x\n", cmd);
return -ENOIOCTLCMD;
}
+
+ wil_dbg_ioctl(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
+ return ret;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 78ba6e04c..8e31d755b 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -23,10 +23,17 @@
#include "wmi.h"
#include "boot_loader.h"
+#define WAIT_FOR_HALP_VOTE_MS 100
+
bool debug_fw; /* = false; */
module_param(debug_fw, bool, S_IRUGO);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
+static bool oob_mode;
+module_param(oob_mode, bool, S_IRUGO);
+MODULE_PARM_DESC(oob_mode,
+ " enable out of the box (OOB) mode in FW, for diagnostics and certification");
+
bool no_fw_recovery;
module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
@@ -127,6 +134,14 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
*d++ = __raw_readl(s++);
}
+void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
+ const volatile void __iomem *src, size_t count)
+{
+ wil_halp_vote(wil);
+ wil_memcpy_fromio_32(dst, src, count);
+ wil_halp_unvote(wil);
+}
+
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count)
{
@@ -137,6 +152,15 @@ void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
__raw_writel(*s++, d++);
}
+void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
+ volatile void __iomem *dst,
+ const void *src, size_t count)
+{
+ wil_halp_vote(wil);
+ wil_memcpy_toio_32(dst, src, count);
+ wil_halp_unvote(wil);
+}
+
static void wil_disconnect_cid(struct wil6210_priv *wil, int cid,
u16 reason_code, bool from_event)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
@@ -149,7 +173,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
might_sleep();
wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
sta->status);
-
+ /* inform upper/lower layers */
if (sta->status != wil_sta_unused) {
if (!from_event)
wmi_disconnect_sta(wil, sta->addr, reason_code, true);
@@ -165,7 +189,7 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
}
sta->status = wil_sta_unused;
}
-
+ /* reorder buffers */
for (i = 0; i < WIL_STA_TID_NUM; i++) {
struct wil_tid_ampdu_rx *r;
@@ -177,13 +201,30 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
+ /* crypto context */
+ memset(sta->tid_crypto_rx, 0, sizeof(sta->tid_crypto_rx));
+ memset(&sta->group_crypto_rx, 0, sizeof(sta->group_crypto_rx));
+ /* release vrings */
for (i = 0; i < ARRAY_SIZE(wil->vring_tx); i++) {
if (wil->vring2cid_tid[i][0] == cid)
wil_vring_fini_tx(wil, i);
}
+ /* statistics */
memset(&sta->stats, 0, sizeof(sta->stats));
}
+static bool wil_ap_is_connected(struct wil6210_priv *wil)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
+ if (wil->sta[i].status == wil_sta_connected)
+ return true;
+ }
+
+ return false;
+}
+
static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
@@ -237,6 +278,11 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
}
clear_bit(wil_status_fwconnecting, wil->status);
break;
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ if (!wil_ap_is_connected(wil))
+ clear_bit(wil_status_fwconnected, wil->status);
+ break;
default:
break;
}
@@ -300,6 +346,11 @@ void wil_set_recovery_state(struct wil6210_priv *wil, int state)
wake_up_interruptible(&wil->wq);
}
+bool wil_is_recovery_blocked(struct wil6210_priv *wil)
+{
+ return no_fw_recovery && (wil->recovery_state == fw_recovery_pending);
+}
+
static void wil_fw_error_worker(struct work_struct *work)
{
struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
@@ -440,27 +491,26 @@ int wil_priv_init(struct wil6210_priv *wil)
mutex_init(&wil->mutex);
mutex_init(&wil->wmi_mutex);
- mutex_init(&wil->back_rx_mutex);
- mutex_init(&wil->back_tx_mutex);
mutex_init(&wil->probe_client_mutex);
+ mutex_init(&wil->p2p_wdev_mutex);
+ mutex_init(&wil->halp.lock);
init_completion(&wil->wmi_ready);
init_completion(&wil->wmi_call);
+ init_completion(&wil->halp.comp);
wil->bcast_vring = -1;
setup_timer(&wil->connect_timer, wil_connect_timer_fn, (ulong)wil);
setup_timer(&wil->scan_timer, wil_scan_timer_fn, (ulong)wil);
+ setup_timer(&wil->p2p.discovery_timer, wil_p2p_discovery_timer_fn,
+ (ulong)wil);
INIT_WORK(&wil->disconnect_worker, wil_disconnect_worker);
INIT_WORK(&wil->wmi_event_worker, wmi_event_worker);
INIT_WORK(&wil->fw_error_worker, wil_fw_error_worker);
- INIT_WORK(&wil->back_rx_worker, wil_back_rx_worker);
- INIT_WORK(&wil->back_tx_worker, wil_back_tx_worker);
INIT_WORK(&wil->probe_client_worker, wil_probe_client_worker);
INIT_LIST_HEAD(&wil->pending_wmi_ev);
- INIT_LIST_HEAD(&wil->back_rx_pending);
- INIT_LIST_HEAD(&wil->back_tx_pending);
INIT_LIST_HEAD(&wil->probe_client_pending);
spin_lock_init(&wil->wmi_ev_lock);
init_waitqueue_head(&wil->wq);
@@ -514,16 +564,14 @@ void wil_priv_deinit(struct wil6210_priv *wil)
wil_set_recovery_state(wil, fw_recovery_idle);
del_timer_sync(&wil->scan_timer);
+ del_timer_sync(&wil->p2p.discovery_timer);
cancel_work_sync(&wil->disconnect_worker);
cancel_work_sync(&wil->fw_error_worker);
+ cancel_work_sync(&wil->p2p.discovery_expired_work);
mutex_lock(&wil->mutex);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
mutex_unlock(&wil->mutex);
wmi_event_flush(wil);
- wil_back_rx_flush(wil);
- cancel_work_sync(&wil->back_rx_worker);
- wil_back_tx_flush(wil);
- cancel_work_sync(&wil->back_tx_worker);
wil_probe_client_flush(wil);
cancel_work_sync(&wil->probe_client_worker);
destroy_workqueue(wil->wq_service);
@@ -542,6 +590,15 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
wil_w(wil, RGF_USER_USER_CPU_0, 1);
}
+static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
+{
+ wil_info(wil, "%s: enable=%d\n", __func__, enable);
+ if (enable)
+ wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+ else
+ wil_c(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
+}
+
static int wil_target_reset(struct wil6210_priv *wil)
{
int delay = 0;
@@ -637,6 +694,7 @@ void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
static int wil_get_bl_info(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
+ struct wiphy *wiphy = wil_to_wiphy(wil);
union {
struct bl_dedicated_registers_v0 bl0;
struct bl_dedicated_registers_v1 bl1;
@@ -681,6 +739,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil)
}
ether_addr_copy(ndev->perm_addr, mac);
+ ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
ether_addr_copy(ndev->dev_addr, mac);
@@ -767,12 +826,24 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->hw_version == HW_VER_UNKNOWN)
return -ENODEV;
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_PRE_RESET);
+ if (rc)
+ wil_err(wil,
+ "%s: PRE_RESET platform notify failed, rc %d\n",
+ __func__, rc);
+ }
+
set_bit(wil_status_resetting, wil->status);
cancel_work_sync(&wil->disconnect_worker);
wil6210_disconnect(wil, NULL, WLAN_REASON_DEAUTH_LEAVING, false);
wil_bcast_fini(wil);
+ /* Disable device led before reset*/
+ wmi_led_cfg(wil, false);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
bitmap_zero(wil->status, wil_status_last);
@@ -807,6 +878,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (rc)
return rc;
+ wil_set_oob_mode(wil, oob_mode);
if (load_fw) {
wil_info(wil, "Use firmware <%s> + board <%s>\n", WIL_FW_NAME,
WIL_FW2_NAME);
@@ -839,6 +911,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
wil->ap_isolate = 0;
reinit_completion(&wil->wmi_ready);
reinit_completion(&wil->wmi_call);
+ reinit_completion(&wil->halp.comp);
if (load_fw) {
wil_configure_interrupt_moderation(wil);
@@ -846,8 +919,27 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* we just started MAC, wait for FW ready */
rc = wil_wait_for_fw_ready(wil);
- if (rc == 0) /* check FW is responsive */
- rc = wmi_echo(wil);
+ if (rc)
+ return rc;
+
+ /* check FW is responsive */
+ rc = wmi_echo(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_echo failed, rc %d\n",
+ __func__, rc);
+ return rc;
+ }
+
+ if (wil->platform_ops.notify) {
+ rc = wil->platform_ops.notify(wil->platform_handle,
+ WIL_PLATFORM_EVT_FW_RDY);
+ if (rc) {
+ wil_err(wil,
+ "%s: FW_RDY notify failed, rc %d\n",
+ __func__, rc);
+ rc = 0;
+ }
+ }
}
return rc;
@@ -954,6 +1046,8 @@ int __wil_down(struct wil6210_priv *wil)
}
wil_enable_irq(wil);
+ (void)wil_p2p_stop_discovery(wil);
+
if (wil->scan_request) {
wil_dbg_misc(wil, "Abort scan_request 0x%p\n",
wil->scan_request);
@@ -1008,3 +1102,51 @@ int wil_find_cid(struct wil6210_priv *wil, const u8 *mac)
return rc;
}
+
+void wil_halp_vote(struct wil6210_priv *wil)
+{
+ unsigned long rc;
+ unsigned long to_jiffies = msecs_to_jiffies(WAIT_FOR_HALP_VOTE_MS);
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ if (++wil->halp.ref_cnt == 1) {
+ wil6210_set_halp(wil);
+ rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
+ if (!rc)
+ wil_err(wil, "%s: HALP vote timed out\n", __func__);
+ else
+ wil_dbg_misc(wil,
+ "%s: HALP vote completed after %d ms\n",
+ __func__,
+ jiffies_to_msecs(to_jiffies - rc));
+ }
+
+ wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
+
+void wil_halp_unvote(struct wil6210_priv *wil)
+{
+ WARN_ON(wil->halp.ref_cnt == 0);
+
+ mutex_lock(&wil->halp.lock);
+
+ wil_dbg_misc(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ if (--wil->halp.ref_cnt == 0) {
+ wil6210_clear_halp(wil);
+ wil_dbg_misc(wil, "%s: HALP unvote\n", __func__);
+ }
+
+ wil_dbg_misc(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil->halp.ref_cnt);
+
+ mutex_unlock(&wil->halp.lock);
+}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ecc3c1bda..098409753 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -60,11 +60,7 @@ static int wil_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- int ret = wil_ioctl(wil, ifr->ifr_data, cmd);
-
- wil_dbg_misc(wil, "ioctl(0x%04x) -> %d\n", cmd, ret);
-
- return ret;
+ return wil_ioctl(wil, ifr->ifr_data, cmd);
}
static const struct net_device_ops wil_netdev_ops = {
@@ -149,6 +145,7 @@ void *wil_if_alloc(struct device *dev)
wil = wdev_to_wil(wdev);
wil->wdev = wdev;
+ wil->radio_wdev = wdev;
wil_dbg_misc(wil, "%s()\n", __func__);
@@ -160,7 +157,7 @@ void *wil_if_alloc(struct device *dev)
wdev->iftype = NL80211_IFTYPE_STATION; /* TODO */
/* default monitor channel */
- ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
+ ch = wdev->wiphy->bands[NL80211_BAND_60GHZ]->channels;
cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, wil_dev_setup);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
new file mode 100644
index 000000000..1c9153894
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "wil6210.h"
+#include "wmi.h"
+
+#define P2P_WILDCARD_SSID "DIRECT-"
+#define P2P_DMG_SOCIAL_CHANNEL 2
+#define P2P_SEARCH_DURATION_MS 500
+#define P2P_DEFAULT_BI 100
+
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request)
+{
+ return (request->n_channels == 1) &&
+ (request->channels[0]->hw_value == P2P_DMG_SOCIAL_CHANNEL);
+}
+
+void wil_p2p_discovery_timer_fn(ulong x)
+{
+ struct wil6210_priv *wil = (void *)x;
+
+ wil_dbg_misc(wil, "%s\n", __func__);
+
+ schedule_work(&wil->p2p.discovery_expired_work);
+}
+
+int wil_p2p_search(struct wil6210_priv *wil,
+ struct cfg80211_scan_request *request)
+{
+ int rc;
+ struct wil_p2p_info *p2p = &wil->p2p;
+
+ wil_dbg_misc(wil, "%s: channel %d\n",
+ __func__, P2P_DMG_SOCIAL_CHANNEL);
+
+ mutex_lock(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "%s: search failed. discovery already ongoing\n",
+ __func__);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ goto out;
+ }
+
+ rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ goto out_stop;
+ }
+
+ /* Set application IE to probe request and probe response */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
+ __func__);
+ goto out_stop;
+ }
+
+ /* supplicant doesn't provide Probe Response IEs. As a workaround -
+ * re-use Probe Request IEs
+ */
+ rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
+ request->ie_len, request->ie);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
+ __func__);
+ goto out_stop;
+ }
+
+ rc = wmi_start_search(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+ goto out_stop;
+ }
+
+ p2p->discovery_started = 1;
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_search_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(P2P_SEARCH_DURATION_MS));
+
+out_stop:
+ if (rc)
+ wmi_stop_discovery(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+ struct ieee80211_channel *chan, u64 *cookie)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 channel = P2P_DMG_SOCIAL_CHANNEL;
+ int rc;
+
+ if (chan)
+ channel = chan->hw_value;
+
+ wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+
+ mutex_lock(&wil->mutex);
+
+ if (p2p->discovery_started) {
+ wil_err(wil, "%s: discovery already ongoing\n", __func__);
+ rc = -EBUSY;
+ goto out;
+ }
+
+ rc = wmi_p2p_cfg(wil, channel, P2P_DEFAULT_BI);
+ if (rc) {
+ wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ goto out;
+ }
+
+ rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
+ if (rc) {
+ wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ goto out_stop;
+ }
+
+ rc = wmi_start_listen(wil);
+ if (rc) {
+ wil_err(wil, "%s: wmi_start_listen failed\n", __func__);
+ goto out_stop;
+ }
+
+ memcpy(&p2p->listen_chan, chan, sizeof(*chan));
+ *cookie = ++p2p->cookie;
+
+ p2p->discovery_started = 1;
+ INIT_WORK(&p2p->discovery_expired_work, wil_p2p_listen_expired);
+ mod_timer(&p2p->discovery_timer,
+ jiffies + msecs_to_jiffies(duration));
+
+out_stop:
+ if (rc)
+ wmi_stop_discovery(wil);
+
+out:
+ mutex_unlock(&wil->mutex);
+ return rc;
+}
+
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 started = p2p->discovery_started;
+
+ if (p2p->discovery_started) {
+ del_timer_sync(&p2p->discovery_timer);
+ p2p->discovery_started = 0;
+ wmi_stop_discovery(wil);
+ }
+
+ return started;
+}
+
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
+{
+ struct wil_p2p_info *p2p = &wil->p2p;
+ u8 started;
+
+ mutex_lock(&wil->mutex);
+
+ if (cookie != p2p->cookie) {
+ wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ __func__, p2p->cookie, cookie);
+ mutex_unlock(&wil->mutex);
+ return -ENOENT;
+ }
+
+ started = wil_p2p_stop_discovery(wil);
+
+ mutex_unlock(&wil->mutex);
+
+ if (!started) {
+ wil_err(wil, "%s: listen not started\n", __func__);
+ return -ENOENT;
+ }
+
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ return 0;
+}
+
+void wil_p2p_listen_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_priv *wil = container_of(p2p,
+ struct wil6210_priv, p2p);
+ u8 started;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (started) {
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_remain_on_channel_expired(wil->radio_wdev,
+ p2p->cookie,
+ &p2p->listen_chan,
+ GFP_KERNEL);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ }
+
+}
+
+void wil_p2p_search_expired(struct work_struct *work)
+{
+ struct wil_p2p_info *p2p = container_of(work,
+ struct wil_p2p_info, discovery_expired_work);
+ struct wil6210_priv *wil = container_of(p2p,
+ struct wil6210_priv, p2p);
+ u8 started;
+
+ wil_dbg_misc(wil, "%s()\n", __func__);
+
+ mutex_lock(&wil->mutex);
+ started = wil_p2p_stop_discovery(wil);
+ mutex_unlock(&wil->mutex);
+
+ if (started) {
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_scan_done(wil->scan_request, 0);
+ wil->scan_request = NULL;
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
+ }
+}
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index e36f2a0c8..aeb72c438 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -275,6 +275,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
if (wil->platform_ops.uninit)
wil->platform_ops.uninit(wil->platform_handle);
+ wil_p2p_wdev_free(wil);
wil_if_free(wil);
}
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 32031e7a1..19ed127d4 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -291,35 +291,15 @@ static u16 wil_agg_size(struct wil6210_priv *wil, u16 req_agg_wsize)
return min(max_agg_size, req_agg_wsize);
}
-/* Block Ack - Rx side (recipient */
+/* Block Ack - Rx side (recipient) */
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl)
-{
- struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
-
- req->cidxtid = cidxtid;
- req->dialog_token = dialog_token;
- req->ba_param_set = le16_to_cpu(ba_param_set);
- req->ba_timeout = le16_to_cpu(ba_timeout);
- req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl);
-
- mutex_lock(&wil->back_rx_mutex);
- list_add_tail(&req->list, &wil->back_rx_pending);
- mutex_unlock(&wil->back_rx_mutex);
-
- queue_work(wil->wq_service, &wil->back_rx_worker);
-
- return 0;
-}
-
-static void wil_back_rx_handle(struct wil6210_priv *wil,
- struct wil_back_rx *req)
__acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
{
+ u16 param_set = le16_to_cpu(ba_param_set);
+ u16 agg_timeout = le16_to_cpu(ba_timeout);
+ u16 seq_ctrl = le16_to_cpu(ba_seq_ctrl);
struct wil_sta_info *sta;
u8 cid, tid;
u16 agg_wsize = 0;
@@ -328,34 +308,35 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
* bits 2..5: TID
* bits 6..15: buffer size
*/
- u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
- bool agg_amsdu = !!(req->ba_param_set & BIT(0));
- int ba_policy = req->ba_param_set & BIT(1);
- u16 agg_timeout = req->ba_timeout;
+ u16 req_agg_wsize = WIL_GET_BITS(param_set, 6, 15);
+ bool agg_amsdu = !!(param_set & BIT(0));
+ int ba_policy = param_set & BIT(1);
u16 status = WLAN_STATUS_SUCCESS;
- u16 ssn = req->ba_seq_ctrl >> 4;
+ u16 ssn = seq_ctrl >> 4;
struct wil_tid_ampdu_rx *r;
- int rc;
+ int rc = 0;
might_sleep();
- parse_cidxtid(req->cidxtid, &cid, &tid);
+ parse_cidxtid(cidxtid, &cid, &tid);
/* sanity checks */
if (cid >= WIL6210_MAX_CID) {
wil_err(wil, "BACK: invalid CID %d\n", cid);
- return;
+ rc = -EINVAL;
+ goto out;
}
sta = &wil->sta[cid];
if (sta->status != wil_sta_connected) {
wil_err(wil, "BACK: CID %d not connected\n", cid);
- return;
+ rc = -EINVAL;
+ goto out;
}
wil_dbg_wmi(wil,
"ADDBA request for CID %d %pM TID %d size %d timeout %d AMSDU%s policy %d token %d SSN 0x%03x\n",
- cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
- agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
+ cid, sta->addr, tid, req_agg_wsize, agg_timeout,
+ agg_amsdu ? "+" : "-", !!ba_policy, dialog_token, ssn);
/* apply policies */
if (ba_policy) {
@@ -365,10 +346,13 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
if (status == WLAN_STATUS_SUCCESS)
agg_wsize = wil_agg_size(wil, req_agg_wsize);
- rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
+ rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
- if (rc || (status != WLAN_STATUS_SUCCESS))
- return;
+ if (rc || (status != WLAN_STATUS_SUCCESS)) {
+ wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
+ __func__, rc, status);
+ goto out;
+ }
/* apply */
r = wil_tid_ampdu_rx_alloc(wil, agg_wsize, ssn);
@@ -376,143 +360,37 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
wil_tid_ampdu_rx_free(wil, sta->tid_rx[tid]);
sta->tid_rx[tid] = r;
spin_unlock_bh(&sta->tid_rx_lock);
-}
-
-void wil_back_rx_flush(struct wil6210_priv *wil)
-{
- struct wil_back_rx *evt, *t;
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- mutex_lock(&wil->back_rx_mutex);
-
- list_for_each_entry_safe(evt, t, &wil->back_rx_pending, list) {
- list_del(&evt->list);
- kfree(evt);
- }
-
- mutex_unlock(&wil->back_rx_mutex);
-}
-
-/* Retrieve next ADDBA request from the pending list */
-static struct list_head *next_back_rx(struct wil6210_priv *wil)
-{
- struct list_head *ret = NULL;
-
- mutex_lock(&wil->back_rx_mutex);
-
- if (!list_empty(&wil->back_rx_pending)) {
- ret = wil->back_rx_pending.next;
- list_del(ret);
- }
-
- mutex_unlock(&wil->back_rx_mutex);
-
- return ret;
-}
-
-void wil_back_rx_worker(struct work_struct *work)
-{
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- back_rx_worker);
- struct wil_back_rx *evt;
- struct list_head *lh;
-
- while ((lh = next_back_rx(wil)) != NULL) {
- evt = list_entry(lh, struct wil_back_rx, list);
-
- wil_back_rx_handle(wil, evt);
- kfree(evt);
- }
+out:
+ return rc;
}
-/* BACK - Tx (originator) side */
-static void wil_back_tx_handle(struct wil6210_priv *wil,
- struct wil_back_tx *req)
+/* BACK - Tx side (originator) */
+int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
{
- struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid];
- int rc;
+ u8 agg_wsize = wil_agg_size(wil, wsize);
+ u16 agg_timeout = 0;
+ struct vring_tx_data *txdata = &wil->vring_tx_data[ringid];
+ int rc = 0;
if (txdata->addba_in_progress) {
wil_dbg_misc(wil, "ADDBA for vring[%d] already in progress\n",
- req->ringid);
- return;
+ ringid);
+ goto out;
}
if (txdata->agg_wsize) {
wil_dbg_misc(wil,
- "ADDBA for vring[%d] already established wsize %d\n",
- req->ringid, txdata->agg_wsize);
- return;
+ "ADDBA for vring[%d] already done for wsize %d\n",
+ ringid, txdata->agg_wsize);
+ goto out;
}
txdata->addba_in_progress = true;
- rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout);
- if (rc)
+ rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
+ if (rc) {
+ wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
txdata->addba_in_progress = false;
-}
-
-static struct list_head *next_back_tx(struct wil6210_priv *wil)
-{
- struct list_head *ret = NULL;
-
- mutex_lock(&wil->back_tx_mutex);
-
- if (!list_empty(&wil->back_tx_pending)) {
- ret = wil->back_tx_pending.next;
- list_del(ret);
- }
-
- mutex_unlock(&wil->back_tx_mutex);
-
- return ret;
-}
-
-void wil_back_tx_worker(struct work_struct *work)
-{
- struct wil6210_priv *wil = container_of(work, struct wil6210_priv,
- back_tx_worker);
- struct wil_back_tx *evt;
- struct list_head *lh;
-
- while ((lh = next_back_tx(wil)) != NULL) {
- evt = list_entry(lh, struct wil_back_tx, list);
-
- wil_back_tx_handle(wil, evt);
- kfree(evt);
}
-}
-
-void wil_back_tx_flush(struct wil6210_priv *wil)
-{
- struct wil_back_tx *evt, *t;
-
- wil_dbg_misc(wil, "%s()\n", __func__);
-
- mutex_lock(&wil->back_tx_mutex);
-
- list_for_each_entry_safe(evt, t, &wil->back_tx_pending, list) {
- list_del(&evt->list);
- kfree(evt);
- }
-
- mutex_unlock(&wil->back_tx_mutex);
-}
-
-int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
-{
- struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL);
-
- if (!req)
- return -ENOMEM;
- req->ringid = ringid;
- req->agg_wsize = wil_agg_size(wil, wsize);
- req->agg_timeout = 0;
-
- mutex_lock(&wil->back_tx_mutex);
- list_add_tail(&req->list, &wil->back_tx_pending);
- mutex_unlock(&wil->back_tx_mutex);
-
- queue_work(wil->wq_service, &wil->back_tx_worker);
-
- return 0;
+out:
+ return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index e59239d22..c4db2a9d9 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2013-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -37,39 +37,40 @@ static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
DECLARE_EVENT_CLASS(wil6210_wmi,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len),
TP_STRUCT__entry(
__field(u8, mid)
- __field(u16, id)
- __field(u32, timestamp)
+ __field(u16, command_id)
+ __field(u32, fw_timestamp)
__field(u16, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
__entry->mid = wmi->mid;
- __entry->id = le16_to_cpu(wmi->id);
- __entry->timestamp = le32_to_cpu(wmi->timestamp);
+ __entry->command_id = le16_to_cpu(wmi->command_id);
+ __entry->fw_timestamp = le32_to_cpu(wmi->fw_timestamp);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
"MID %d id 0x%04x len %d timestamp %d",
- __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
+ __entry->mid, __entry->command_id, __entry->buf_len,
+ __entry->fw_timestamp
)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
- TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_PROTO(struct wmi_cmd_hdr *wmi, void *buf, u16 buf_len),
TP_ARGS(wmi, buf, buf_len)
);
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index 6af20903c..a4e43796a 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -549,6 +549,60 @@ static int wil_rx_refill(struct wil6210_priv *wil, int count)
return rc;
}
+/**
+ * reverse_memcmp - Compare two areas of memory, in reverse order
+ * @cs: One area of memory
+ * @ct: Another area of memory
+ * @count: The size of the area.
+ *
+ * Cut'n'paste from original memcmp (see lib/string.c)
+ * with minimal modifications
+ */
+static int reverse_memcmp(const void *cs, const void *ct, size_t count)
+{
+ const unsigned char *su1, *su2;
+ int res = 0;
+
+ for (su1 = cs + count - 1, su2 = ct + count - 1; count > 0;
+ --su1, --su2, count--) {
+ res = *su1 - *su2;
+ if (res)
+ break;
+ }
+ return res;
+}
+
+static int wil_rx_crypto_check(struct wil6210_priv *wil, struct sk_buff *skb)
+{
+ struct vring_rx_desc *d = wil_skb_rxdesc(skb);
+ int cid = wil_rxdesc_cid(d);
+ int tid = wil_rxdesc_tid(d);
+ int key_id = wil_rxdesc_key_id(d);
+ int mc = wil_rxdesc_mcast(d);
+ struct wil_sta_info *s = &wil->sta[cid];
+ struct wil_tid_crypto_rx *c = mc ? &s->group_crypto_rx :
+ &s->tid_crypto_rx[tid];
+ struct wil_tid_crypto_rx_single *cc = &c->key_id[key_id];
+ const u8 *pn = (u8 *)&d->mac.pn_15_0;
+
+ if (!cc->key_set) {
+ wil_err_ratelimited(wil,
+ "Key missing. CID %d TID %d MCast %d KEY_ID %d\n",
+ cid, tid, mc, key_id);
+ return -EINVAL;
+ }
+
+ if (reverse_memcmp(pn, cc->pn, IEEE80211_GCMP_PN_LEN) <= 0) {
+ wil_err_ratelimited(wil,
+ "Replay attack. CID %d TID %d MCast %d KEY_ID %d PN %6phN last %6phN\n",
+ cid, tid, mc, key_id, pn, cc->pn);
+ return -EINVAL;
+ }
+ memcpy(cc->pn, pn, IEEE80211_GCMP_PN_LEN);
+
+ return 0;
+}
+
/*
* Pass Rx packet to the netif. Update statistics.
* Called in softirq context (NAPI poll).
@@ -561,6 +615,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
unsigned int len = skb->len;
struct vring_rx_desc *d = wil_skb_rxdesc(skb);
int cid = wil_rxdesc_cid(d); /* always 0..7, no need to check */
+ int security = wil_rxdesc_security(d);
struct ethhdr *eth = (void *)skb->data;
/* here looking for DA, not A1, thus Rxdesc's 'mcast' indication
* is not suitable, need to look at data
@@ -586,6 +641,13 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
skb_orphan(skb);
+ if (security && (wil_rx_crypto_check(wil, skb) != 0)) {
+ rc = GRO_DROP;
+ dev_kfree_skb(skb);
+ stats->rx_replay++;
+ goto stats;
+ }
+
if (wdev->iftype == NL80211_IFTYPE_AP && !wil->ap_isolate) {
if (mcast) {
/* send multicast frames both to higher layers in
@@ -627,6 +689,7 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
len, gro_res_str[rc]);
}
+stats:
/* statistics. rc set to GRO_NORMAL for AP bridging */
if (unlikely(rc == GRO_DROP)) {
ndev->stats.rx_dropped++;
@@ -757,7 +820,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
},
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
@@ -834,7 +897,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
},
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_vring_cfg_done_event cmd;
} __packed reply;
struct vring *vring = &wil->vring_tx[id];
@@ -1696,7 +1759,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
goto drop;
}
if (unlikely(!test_bit(wil_status_fwconnected, wil->status))) {
- wil_err_ratelimited(wil, "FW not connected\n");
+ wil_dbg_ratelimited(wil, "FW not connected, packet dropped\n");
goto drop;
}
if (unlikely(wil->wdev->iftype == NL80211_IFTYPE_MONITOR)) {
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index ee7c7b4b9..fcdffaa82 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -480,6 +480,16 @@ static inline int wil_rxdesc_ext_subtype(struct vring_rx_desc *d)
return WIL_GET_BITS(d->mac.d0, 28, 31);
}
+static inline int wil_rxdesc_key_id(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 4, 5);
+}
+
+static inline int wil_rxdesc_security(struct vring_rx_desc *d)
+{
+ return WIL_GET_BITS(d->mac.d1, 7, 7);
+}
+
static inline int wil_rxdesc_ds_bits(struct vring_rx_desc *d)
{
return WIL_GET_BITS(d->mac.d1, 8, 9);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5eb41e1f6..1abdd45c3 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -22,6 +22,7 @@
#include <net/cfg80211.h>
#include <linux/timex.h>
#include <linux/types.h>
+#include "wmi.h"
#include "wil_platform.h"
extern bool no_fw_recovery;
@@ -131,6 +132,7 @@ struct RGF_ICR {
/* registers - FW addresses */
#define RGF_USER_USAGE_1 (0x880004)
#define RGF_USER_USAGE_6 (0x880018)
+ #define BIT_USER_OOB_MODE BIT(31)
#define RGF_USER_HW_MACHINE_STATE (0x8801dc)
#define HW_MACHINE_BOOT_DONE (0x3fffffd)
#define RGF_USER_USER_CPU_0 (0x8801e0)
@@ -166,6 +168,7 @@ struct RGF_ICR {
#define RGF_DMA_EP_MISC_ICR (0x881bec) /* struct RGF_ICR */
#define BIT_DMA_EP_MISC_ICR_RX_HTRSH BIT(0)
#define BIT_DMA_EP_MISC_ICR_TX_NO_ACT BIT(1)
+ #define BIT_DMA_EP_MISC_ICR_HALP BIT(27)
#define BIT_DMA_EP_MISC_ICR_FW_INT(n) BIT(28+n) /* n = [0..3] */
/* Legacy interrupt moderation control (before Sparrow v2)*/
@@ -334,29 +337,11 @@ struct wil6210_mbox_hdr {
/* max. value for wil6210_mbox_hdr.len */
#define MAX_MBOXITEM_SIZE (240)
-/**
- * struct wil6210_mbox_hdr_wmi - WMI header
- *
- * @mid: MAC ID
- * 00 - default, created by FW
- * 01..0f - WiFi ports, driver to create
- * 10..fe - debug
- * ff - broadcast
- * @id: command/event ID
- * @timestamp: FW fills for events, free-running msec timer
- */
-struct wil6210_mbox_hdr_wmi {
- u8 mid;
- u8 reserved;
- __le16 id;
- __le32 timestamp;
-} __packed;
-
struct pending_wmi_event {
struct list_head list;
struct {
struct wil6210_mbox_hdr hdr;
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
u8 data[0];
} __packed event;
};
@@ -455,6 +440,29 @@ struct wil_tid_ampdu_rx {
bool first_time; /* is it 1-st time this buffer used? */
};
+/**
+ * struct wil_tid_crypto_rx_single - TID crypto information (Rx).
+ *
+ * @pn: GCMP PN for the session
+ * @key_set: valid key present
+ */
+struct wil_tid_crypto_rx_single {
+ u8 pn[IEEE80211_GCMP_PN_LEN];
+ bool key_set;
+};
+
+struct wil_tid_crypto_rx {
+ struct wil_tid_crypto_rx_single key_id[4];
+};
+
+struct wil_p2p_info {
+ struct ieee80211_channel listen_chan;
+ u8 discovery_started;
+ u64 cookie;
+ struct timer_list discovery_timer; /* listen/search duration */
+ struct work_struct discovery_expired_work; /* listen/search expire */
+};
+
enum wil_sta_status {
wil_sta_unused = 0,
wil_sta_conn_pending = 1,
@@ -474,6 +482,7 @@ struct wil_net_stats {
unsigned long rx_non_data_frame;
unsigned long rx_short_frame;
unsigned long rx_large_frame;
+ unsigned long rx_replay;
u16 last_mcs_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
};
@@ -495,6 +504,8 @@ struct wil_sta_info {
spinlock_t tid_rx_lock; /* guarding tid_rx array */
unsigned long tid_rx_timer_expired[BITS_TO_LONGS(WIL_STA_TID_NUM)];
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
+ struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
+ struct wil_tid_crypto_rx group_crypto_rx;
};
enum {
@@ -507,24 +518,6 @@ enum {
hw_capability_last
};
-struct wil_back_rx {
- struct list_head list;
- /* request params, converted to CPU byte order - what we asked for */
- u8 cidxtid;
- u8 dialog_token;
- u16 ba_param_set;
- u16 ba_timeout;
- u16 ba_seq_ctrl;
-};
-
-struct wil_back_tx {
- struct list_head list;
- /* request params, converted to CPU byte order - what we asked for */
- u8 ringid;
- u8 agg_wsize;
- u16 agg_timeout;
-};
-
struct wil_probe_client_req {
struct list_head list;
u64 cookie;
@@ -542,6 +535,41 @@ struct pmc_ctx {
int descriptor_size;
};
+struct wil_halp {
+ struct mutex lock; /* protect halp ref_cnt */
+ unsigned int ref_cnt;
+ struct completion comp;
+};
+
+struct wil_blob_wrapper {
+ struct wil6210_priv *wil;
+ struct debugfs_blob_wrapper blob;
+};
+
+#define WIL_LED_MAX_ID (2)
+#define WIL_LED_INVALID_ID (0xF)
+#define WIL_LED_BLINK_ON_SLOW_MS (300)
+#define WIL_LED_BLINK_OFF_SLOW_MS (300)
+#define WIL_LED_BLINK_ON_MED_MS (200)
+#define WIL_LED_BLINK_OFF_MED_MS (200)
+#define WIL_LED_BLINK_ON_FAST_MS (100)
+#define WIL_LED_BLINK_OFF_FAST_MS (100)
+enum {
+ WIL_LED_TIME_SLOW = 0,
+ WIL_LED_TIME_MED,
+ WIL_LED_TIME_FAST,
+ WIL_LED_TIME_LAST,
+};
+
+struct blink_on_off_time {
+ u32 on_ms;
+ u32 off_ms;
+};
+
+extern struct blink_on_off_time led_blink_time[WIL_LED_TIME_LAST];
+extern u8 led_id;
+extern u8 led_polarity;
+
struct wil6210_priv {
struct pci_dev *pdev;
struct wireless_dev *wdev;
@@ -595,13 +623,6 @@ struct wil6210_priv {
spinlock_t wmi_ev_lock;
struct napi_struct napi_rx;
struct napi_struct napi_tx;
- /* BACK */
- struct list_head back_rx_pending;
- struct mutex back_rx_mutex; /* protect @back_rx_pending */
- struct work_struct back_rx_worker;
- struct list_head back_tx_pending;
- struct mutex back_tx_mutex; /* protect @back_tx_pending */
- struct work_struct back_tx_worker;
/* keep alive */
struct list_head probe_client_pending;
struct mutex probe_client_mutex; /* protect @probe_client_pending */
@@ -621,12 +642,26 @@ struct wil6210_priv {
atomic_t isr_count_rx, isr_count_tx;
/* debugfs */
struct dentry *debug;
- struct debugfs_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
+ u8 discovery_mode;
void *platform_handle;
struct wil_platform_ops platform_ops;
struct pmc_ctx pmc;
+
+ bool pbss;
+
+ struct wil_p2p_info p2p;
+
+ /* P2P_DEVICE vif */
+ struct wireless_dev *p2p_wdev;
+ struct mutex p2p_wdev_mutex; /* protect @p2p_wdev */
+ struct wireless_dev *radio_wdev;
+
+ /* High Access Latency Policy voting */
+ struct wil_halp halp;
+
};
#define wil_to_wiphy(i) (i->wdev->wiphy)
@@ -640,11 +675,13 @@ struct wil6210_priv {
__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+void __wil_err(struct wil6210_priv *wil, const char *fmt, ...);
+__printf(2, 3)
+void __wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_err_ratelimited(struct wil6210_priv *wil, const char *fmt, ...);
+void __wil_info(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
-void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
+void wil_dbg_ratelimited(const struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
wil_dbg_trace(wil, fmt, ##arg); \
@@ -655,6 +692,10 @@ void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg_wmi(wil, fmt, arg...) wil_dbg(wil, "DBG[ WMI]" fmt, ##arg)
#define wil_dbg_misc(wil, fmt, arg...) wil_dbg(wil, "DBG[MISC]" fmt, ##arg)
#define wil_dbg_pm(wil, fmt, arg...) wil_dbg(wil, "DBG[ PM ]" fmt, ##arg)
+#define wil_err(wil, fmt, arg...) __wil_err(wil, "%s: " fmt, __func__, ##arg)
+#define wil_info(wil, fmt, arg...) __wil_info(wil, "%s: " fmt, __func__, ##arg)
+#define wil_err_ratelimited(wil, fmt, arg...) \
+ __wil_err_ratelimited(wil, "%s: " fmt, __func__, ##arg)
/* target operations */
/* register read */
@@ -712,6 +753,12 @@ void wil_memcpy_fromio_32(void *dst, const volatile void __iomem *src,
size_t count);
void wil_memcpy_toio_32(volatile void __iomem *dst, const void *src,
size_t count);
+void wil_memcpy_fromio_halp_vote(struct wil6210_priv *wil, void *dst,
+ const volatile void __iomem *src,
+ size_t count);
+void wil_memcpy_toio_halp_vote(struct wil6210_priv *wil,
+ volatile void __iomem *dst,
+ const void *src, size_t count);
void *wil_if_alloc(struct device *dev);
void wil_if_free(struct wil6210_priv *wil);
@@ -722,6 +769,7 @@ void wil_priv_deinit(struct wil6210_priv *wil);
int wil_reset(struct wil6210_priv *wil, bool no_fw);
void wil_fw_error_recovery(struct wil6210_priv *wil);
void wil_set_recovery_state(struct wil6210_priv *wil, int state);
+bool wil_is_recovery_blocked(struct wil6210_priv *wil);
int wil_up(struct wil6210_priv *wil);
int __wil_up(struct wil6210_priv *wil);
int wil_down(struct wil6210_priv *wil);
@@ -752,7 +800,6 @@ int wmi_add_cipher_key(struct wil6210_priv *wil, u8 key_index,
int wmi_echo(struct wil6210_priv *wil);
int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie);
int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring);
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel);
int wmi_rxon(struct wil6210_priv *wil, bool on);
int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r);
int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
@@ -765,11 +812,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
int wil_addba_rx_request(struct wil6210_priv *wil, u8 cidxtid,
u8 dialog_token, __le16 ba_param_set,
__le16 ba_timeout, __le16 ba_seq_ctrl);
-void wil_back_rx_worker(struct work_struct *work);
-void wil_back_rx_flush(struct wil6210_priv *wil);
int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize);
-void wil_back_tx_worker(struct work_struct *work);
-void wil_back_tx_flush(struct wil6210_priv *wil);
void wil6210_clear_irq(struct wil6210_priv *wil);
int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi);
@@ -779,6 +822,25 @@ void wil_unmask_irq(struct wil6210_priv *wil);
void wil_configure_interrupt_moderation(struct wil6210_priv *wil);
void wil_disable_irq(struct wil6210_priv *wil);
void wil_enable_irq(struct wil6210_priv *wil);
+
+/* P2P */
+bool wil_p2p_is_social_scan(struct cfg80211_scan_request *request);
+void wil_p2p_discovery_timer_fn(ulong x);
+int wil_p2p_search(struct wil6210_priv *wil,
+ struct cfg80211_scan_request *request);
+int wil_p2p_listen(struct wil6210_priv *wil, unsigned int duration,
+ struct ieee80211_channel *chan, u64 *cookie);
+u8 wil_p2p_stop_discovery(struct wil6210_priv *wil);
+int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie);
+void wil_p2p_listen_expired(struct work_struct *work);
+void wil_p2p_search_expired(struct work_struct *work);
+
+/* WMI for P2P */
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi);
+int wmi_start_listen(struct wil6210_priv *wil);
+int wmi_start_search(struct wil6210_priv *wil);
+int wmi_stop_discovery(struct wil6210_priv *wil);
+
int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct cfg80211_mgmt_tx_params *params,
u64 *cookie);
@@ -790,11 +852,13 @@ int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct wireless_dev *wil_cfg80211_init(struct device *dev);
void wil_wdev_free(struct wil6210_priv *wil);
+void wil_p2p_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
- u8 chan, u8 hidden_ssid);
+ u8 chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_priv *wil);
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event);
void wil_probe_client_flush(struct wil6210_priv *wil);
@@ -832,4 +896,9 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime);
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size);
void wil_fw_core_dump(struct wil6210_priv *wil);
+void wil_halp_vote(struct wil6210_priv *wil);
+void wil_halp_unvote(struct wil6210_priv *wil);
+void wil6210_set_halp(struct wil6210_priv *wil);
+void wil6210_clear_halp(struct wil6210_priv *wil);
+
#endif /* __WIL6210_H__ */
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index 9a949d910..33d4a34b3 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -19,6 +19,12 @@
struct device;
+enum wil_platform_event {
+ WIL_PLATFORM_EVT_FW_CRASH = 0,
+ WIL_PLATFORM_EVT_PRE_RESET = 1,
+ WIL_PLATFORM_EVT_FW_RDY = 2,
+};
+
/**
* struct wil_platform_ops - wil platform module calls from this
* driver to platform driver
@@ -28,7 +34,7 @@ struct wil_platform_ops {
int (*suspend)(void *handle);
int (*resume)(void *handle);
void (*uninit)(void *handle);
- int (*notify_crash)(void *handle);
+ int (*notify)(void *handle, enum wil_platform_event evt);
};
/**
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 493e721c4..b80c5d850 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -32,6 +32,11 @@ module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
+u8 led_id = WIL_LED_INVALID_ID;
+module_param(led_id, byte, S_IRUGO);
+MODULE_PARM_DESC(led_id,
+ " 60G device led enablement. Set the led ID (0-2) to enable");
+
/**
* WMI event receiving - theory of operations
*
@@ -94,6 +99,14 @@ const struct fw_map fw_mapping[] = {
*/
};
+struct blink_on_off_time led_blink_time[] = {
+ {WIL_LED_BLINK_ON_SLOW_MS, WIL_LED_BLINK_OFF_SLOW_MS},
+ {WIL_LED_BLINK_ON_MED_MS, WIL_LED_BLINK_OFF_MED_MS},
+ {WIL_LED_BLINK_ON_FAST_MS, WIL_LED_BLINK_OFF_FAST_MS},
+};
+
+u8 led_polarity = LED_POLARITY_LOW_ACTIVE;
+
/**
* return AHB address for given firmware/ucode internal (linker) address
* @x - internal address
@@ -176,7 +189,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
{
struct {
struct wil6210_mbox_hdr hdr;
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
} __packed cmd = {
.hdr = {
.type = WIL_MBOX_HDR_TYPE_WMI,
@@ -185,7 +198,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
},
.wmi = {
.mid = 0,
- .id = cpu_to_le16(cmdid),
+ .command_id = cpu_to_le16(cmdid),
},
};
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -194,6 +207,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
void __iomem *dst;
void __iomem *head = wmi_addr(wil, r->head);
uint retry;
+ int rc = 0;
if (sizeof(cmd) + len > r->entry_size) {
wil_err(wil, "WMI size too large: %d bytes, max is %d\n",
@@ -212,6 +226,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_err(wil, "WMI head is garbage: 0x%08x\n", r->head);
return -EINVAL;
}
+
+ wil_halp_vote(wil);
+
/* read Tx head till it is not busy */
for (retry = 5; retry > 0; retry--) {
wil_memcpy_fromio_32(&d_head, head, sizeof(d_head));
@@ -221,7 +238,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (d_head.sync != 0) {
wil_err(wil, "WMI head busy\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
/* next head */
next_head = r->base + ((r->head - r->base + sizeof(d_head)) % r->size);
@@ -230,7 +248,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
for (retry = 5; retry > 0; retry--) {
if (!test_bit(wil_status_fwready, wil->status)) {
wil_err(wil, "WMI: cannot send command while FW not ready\n");
- return -EAGAIN;
+ rc = -EAGAIN;
+ goto out;
}
r->tail = wil_r(wil, RGF_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.tail));
@@ -240,13 +259,15 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
}
if (next_head == r->tail) {
wil_err(wil, "WMI ring full\n");
- return -EBUSY;
+ rc = -EBUSY;
+ goto out;
}
dst = wmi_buffer(wil, d_head.addr);
if (!dst) {
wil_err(wil, "invalid WMI buffer: 0x%08x\n",
le32_to_cpu(d_head.addr));
- return -EINVAL;
+ rc = -EAGAIN;
+ goto out;
}
cmd.hdr.seq = cpu_to_le16(++wil->wmi_seq);
/* set command */
@@ -269,7 +290,9 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
wil_w(wil, RGF_USER_USER_ICR + offsetof(struct RGF_ICR, ICS),
SW_INT_MBOX);
- return 0;
+out:
+ wil_halp_unvote(wil);
+ return rc;
}
int wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
@@ -333,7 +356,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
ch_no = data->info.channel + 1;
- freq = ieee80211_channel_to_frequency(ch_no, IEEE80211_BAND_60GHZ);
+ freq = ieee80211_channel_to_frequency(ch_no, NL80211_BAND_60GHZ);
channel = ieee80211_get_channel(wiphy, freq);
signal = data->info.sqi;
d_status = le16_to_cpu(data->info.status);
@@ -368,6 +391,8 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_hex_dump_wmi("IE ", DUMP_PREFIX_OFFSET, 16, 1, ie_buf,
ie_len, true);
+ wil_dbg_wmi(wil, "Capability info : 0x%04x\n", cap);
+
bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
d_len, signal, GFP_KERNEL);
if (bss) {
@@ -378,8 +403,10 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
wil_err(wil, "cfg80211_inform_bss_frame() failed\n");
}
} else {
- cfg80211_rx_mgmt(wil->wdev, freq, signal,
+ mutex_lock(&wil->p2p_wdev_mutex);
+ cfg80211_rx_mgmt(wil->radio_wdev, freq, signal,
(void *)rx_mgmt_frame, d_len, 0);
+ mutex_unlock(&wil->p2p_wdev_mutex);
}
}
@@ -406,7 +433,10 @@ static void wmi_evt_scan_complete(struct wil6210_priv *wil, int id,
wil->scan_request, aborted);
del_timer_sync(&wil->scan_timer);
+ mutex_lock(&wil->p2p_wdev_mutex);
cfg80211_scan_done(wil->scan_request, aborted);
+ wil->radio_wdev = wil->wdev;
+ mutex_unlock(&wil->p2p_wdev_mutex);
wil->scan_request = NULL;
} else {
wil_err(wil, "SCAN_COMPLETE while not scanning\n");
@@ -487,6 +517,14 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
return;
}
del_timer_sync(&wil->connect_timer);
+ } else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
+ (wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
+ if (wil->sta[evt->cid].status != wil_sta_unused) {
+ wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
+ __func__, wil->sta[evt->cid].status, evt->cid);
+ mutex_unlock(&wil->mutex);
+ return;
+ }
}
/* FIXME FW can transmit only ucast frames to peer */
@@ -648,7 +686,7 @@ static void wmi_evt_vring_en(struct wil6210_priv *wil, int id, void *d, int len)
static void wmi_evt_ba_status(struct wil6210_priv *wil, int id, void *d,
int len)
{
- struct wmi_vring_ba_status_event *evt = d;
+ struct wmi_ba_status_event *evt = d;
struct vring_tx_data *txdata;
wil_dbg_wmi(wil, "BACK[%d] %s {%d} timeout %d AMSDU%s\n",
@@ -834,10 +872,10 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
offsetof(struct wil6210_mbox_ring_desc, sync), 0);
/* indicate */
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
- (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
- u16 id = le16_to_cpu(wmi->id);
- u32 tstamp = le32_to_cpu(wmi->timestamp);
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->command_id);
+ u32 tstamp = le32_to_cpu(wmi->fw_timestamp);
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
if (wil->reply_id && wil->reply_id == id) {
if (wil->reply_buf) {
@@ -946,8 +984,62 @@ int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
return wmi_send(wil, WMI_SET_MAC_ADDRESS_CMDID, &cmd, sizeof(cmd));
}
+int wmi_led_cfg(struct wil6210_priv *wil, bool enable)
+{
+ int rc = 0;
+ struct wmi_led_cfg_cmd cmd = {
+ .led_mode = enable,
+ .id = led_id,
+ .slow_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].on_ms),
+ .slow_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_SLOW].off_ms),
+ .medium_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].on_ms),
+ .medium_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_MED].off_ms),
+ .fast_blink_cfg.blink_on =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].on_ms),
+ .fast_blink_cfg.blink_off =
+ cpu_to_le32(led_blink_time[WIL_LED_TIME_FAST].off_ms),
+ .led_polarity = led_polarity,
+ };
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_led_cfg_done_event evt;
+ } __packed reply;
+
+ if (led_id == WIL_LED_INVALID_ID)
+ goto out;
+
+ if (led_id > WIL_LED_MAX_ID) {
+ wil_err(wil, "Invalid led id %d\n", led_id);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ wil_dbg_wmi(wil,
+ "%s led %d\n",
+ enable ? "enabling" : "disabling", led_id);
+
+ rc = wmi_call(wil, WMI_LED_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_LED_CFG_DONE_EVENTID, &reply, sizeof(reply),
+ 100);
+ if (rc)
+ goto out;
+
+ if (reply.evt.status) {
+ wil_err(wil, "led %d cfg failed with status %d\n",
+ led_id, le32_to_cpu(reply.evt.status));
+ rc = -EINVAL;
+ }
+
+out:
+ return rc;
+}
+
int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
- u8 chan, u8 hidden_ssid)
+ u8 chan, u8 hidden_ssid, u8 is_go)
{
int rc;
@@ -958,9 +1050,10 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
.channel = chan - 1,
.pcp_max_assoc_sta = max_assoc_sta,
.hidden_ssid = hidden_ssid,
+ .is_go = is_go,
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_pcp_started_event evt;
} __packed reply;
@@ -987,11 +1080,21 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
if (reply.evt.status != WMI_FW_STATUS_SUCCESS)
rc = -EINVAL;
+ if (wmi_nettype != WMI_NETTYPE_P2P)
+ /* Don't fail due to error in the led configuration */
+ wmi_led_cfg(wil, true);
+
return rc;
}
int wmi_pcp_stop(struct wil6210_priv *wil)
{
+ int rc;
+
+ rc = wmi_led_cfg(wil, false);
+ if (rc)
+ return rc;
+
return wmi_call(wil, WMI_PCP_STOP_CMDID, NULL, 0,
WMI_PCP_STOPPED_EVENTID, NULL, 0, 20);
}
@@ -1014,7 +1117,7 @@ int wmi_get_ssid(struct wil6210_priv *wil, u8 *ssid_len, void *ssid)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_set_ssid_cmd cmd;
} __packed reply;
int len; /* reply.cmd.ssid_len in CPU order */
@@ -1047,7 +1150,7 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_set_pcp_channel_cmd cmd;
} __packed reply;
@@ -1064,14 +1167,86 @@ int wmi_get_channel(struct wil6210_priv *wil, int *channel)
return 0;
}
-int wmi_p2p_cfg(struct wil6210_priv *wil, int channel)
+int wmi_p2p_cfg(struct wil6210_priv *wil, int channel, int bi)
{
+ int rc;
struct wmi_p2p_cfg_cmd cmd = {
- .discovery_mode = WMI_DISCOVERY_MODE_NON_OFFLOAD,
+ .discovery_mode = WMI_DISCOVERY_MODE_PEER2PEER,
+ .bcon_interval = cpu_to_le16(bi),
.channel = channel - 1,
};
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_p2p_cfg_done_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_P2P_CFG_CMDID\n");
+
+ rc = wmi_call(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd),
+ WMI_P2P_CFG_DONE_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "P2P_CFG failed. status %d\n", reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_listen(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_listen_started_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_START_LISTEN_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
+ WMI_LISTEN_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start listen. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_start_search(struct wil6210_priv *wil)
+{
+ int rc;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_search_started_event evt;
+ } __packed reply;
+
+ wil_dbg_wmi(wil, "sending WMI_START_SEARCH_CMDID\n");
+
+ rc = wmi_call(wil, WMI_START_SEARCH_CMDID, NULL, 0,
+ WMI_SEARCH_STARTED_EVENTID, &reply, sizeof(reply), 300);
+ if (!rc && reply.evt.status != WMI_FW_STATUS_SUCCESS) {
+ wil_err(wil, "device failed to start search. status %d\n",
+ reply.evt.status);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+int wmi_stop_discovery(struct wil6210_priv *wil)
+{
+ int rc;
+
+ wil_dbg_wmi(wil, "sending WMI_DISCOVERY_STOP_CMDID\n");
+
+ rc = wmi_call(wil, WMI_DISCOVERY_STOP_CMDID, NULL, 0,
+ WMI_DISCOVERY_STOPPED_EVENTID, NULL, 0, 100);
- return wmi_send(wil, WMI_P2P_CFG_CMDID, &cmd, sizeof(cmd));
+ if (rc)
+ wil_err(wil, "Failed to stop discovery\n");
+
+ return rc;
}
int wmi_del_cipher_key(struct wil6210_priv *wil, u8 key_index,
@@ -1155,7 +1330,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
{
int rc;
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_listen_started_event evt;
} __packed reply;
@@ -1192,7 +1367,7 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
.host_thrsh = cpu_to_le16(rx_ring_overflow_thrsh),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_cfg_rx_chain_done_event evt;
} __packed evt;
int rc;
@@ -1246,7 +1421,7 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf)
.measure_mode = cpu_to_le32(TEMPERATURE_MEASURE_NOW),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_temp_sense_done_event evt;
} __packed reply;
@@ -1272,7 +1447,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac, u16 reason,
.disconnect_reason = cpu_to_le16(reason),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_disconnect_event evt;
} __packed reply;
@@ -1364,7 +1539,7 @@ int wmi_addba_rx_resp(struct wil6210_priv *wil, u8 cid, u8 tid, u8 token,
.ba_timeout = cpu_to_le16(timeout),
};
struct {
- struct wil6210_mbox_hdr_wmi wmi;
+ struct wmi_cmd_hdr wmi;
struct wmi_rcp_addba_resp_sent_event evt;
} __packed reply;
@@ -1420,10 +1595,10 @@ static void wmi_event_handle(struct wil6210_priv *wil,
u16 len = le16_to_cpu(hdr->len);
if ((hdr->type == WIL_MBOX_HDR_TYPE_WMI) &&
- (len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- struct wil6210_mbox_hdr_wmi *wmi = (void *)(&hdr[1]);
+ (len >= sizeof(struct wmi_cmd_hdr))) {
+ struct wmi_cmd_hdr *wmi = (void *)(&hdr[1]);
void *evt_data = (void *)(&wmi[1]);
- u16 id = le16_to_cpu(wmi->id);
+ u16 id = le16_to_cpu(wmi->command_id);
wil_dbg_wmi(wil, "Handle WMI 0x%04x (reply_id 0x%04x)\n",
id, wil->reply_id);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 6e90e78f1..685fe0dde 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
- * Copyright (c) 2006-2012 Wilocity .
+ * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2006-2012 Wilocity
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -17,187 +17,198 @@
/*
* This file contains the definitions of the WMI protocol specified in the
- * Wireless Module Interface (WMI) for the Wilocity
- * MARLON 60 Gigabit wireless solution.
+ * Wireless Module Interface (WMI) for the Qualcomm
+ * 60 GHz wireless solution.
* It includes definitions of all the commands and events.
* Commands are messages from the host to the WM.
* Events are messages from the WM to the host.
+ *
+ * This is an automatically generated file.
*/
#ifndef __WILOCITY_WMI_H__
#define __WILOCITY_WMI_H__
/* General */
-#define WILOCITY_MAX_ASSOC_STA (8)
-#define WILOCITY_DEFAULT_ASSOC_STA (1)
-#define WMI_MAC_LEN (6)
-#define WMI_PROX_RANGE_NUM (3)
-#define WMI_MAX_LOSS_DMG_BEACONS (32)
+#define WMI_MAX_ASSOC_STA (8)
+#define WMI_DEFAULT_ASSOC_STA (1)
+#define WMI_MAC_LEN (6)
+#define WMI_PROX_RANGE_NUM (3)
+#define WMI_MAX_LOSS_DMG_BEACONS (20)
+
+/* Mailbox interface
+ * used for commands and events
+ */
+enum wmi_mid {
+ MID_DEFAULT = 0x00,
+ FIRST_DBG_MID_ID = 0x10,
+ LAST_DBG_MID_ID = 0xFE,
+ MID_BROADCAST = 0xFF,
+};
+
+/* WMI_CMD_HDR */
+struct wmi_cmd_hdr {
+ u8 mid;
+ u8 reserved;
+ __le16 command_id;
+ __le32 fw_timestamp;
+} __packed;
/* List of Commands */
enum wmi_command_id {
- WMI_CONNECT_CMDID = 0x0001,
- WMI_DISCONNECT_CMDID = 0x0003,
- WMI_DISCONNECT_STA_CMDID = 0x0004,
- WMI_START_SCAN_CMDID = 0x0007,
- WMI_SET_BSS_FILTER_CMDID = 0x0009,
- WMI_SET_PROBED_SSID_CMDID = 0x000a,
- WMI_SET_LISTEN_INT_CMDID = 0x000b,
- WMI_BCON_CTRL_CMDID = 0x000f,
- WMI_ADD_CIPHER_KEY_CMDID = 0x0016,
- WMI_DELETE_CIPHER_KEY_CMDID = 0x0017,
- WMI_SET_APPIE_CMDID = 0x003f,
- WMI_SET_WSC_STATUS_CMDID = 0x0041,
- WMI_PXMT_RANGE_CFG_CMDID = 0x0042,
- WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x0043,
-/* WMI_FAST_MEM_ACC_MODE_CMDID = 0x0300, */
- WMI_MEM_READ_CMDID = 0x0800,
- WMI_MEM_WR_CMDID = 0x0801,
- WMI_ECHO_CMDID = 0x0803,
- WMI_DEEP_ECHO_CMDID = 0x0804,
- WMI_CONFIG_MAC_CMDID = 0x0805,
- WMI_CONFIG_PHY_DEBUG_CMDID = 0x0806,
- WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x0808,
- WMI_PHY_GET_STATISTICS_CMDID = 0x0809,
- WMI_FS_TUNE_CMDID = 0x080a,
- WMI_CORR_MEASURE_CMDID = 0x080b,
- WMI_READ_RSSI_CMDID = 0x080c,
- WMI_TEMP_SENSE_CMDID = 0x080e,
- WMI_DC_CALIB_CMDID = 0x080f,
- WMI_SEND_TONE_CMDID = 0x0810,
- WMI_IQ_TX_CALIB_CMDID = 0x0811,
- WMI_IQ_RX_CALIB_CMDID = 0x0812,
- WMI_SET_UCODE_IDLE_CMDID = 0x0813,
- WMI_SET_WORK_MODE_CMDID = 0x0815,
- WMI_LO_LEAKAGE_CALIB_CMDID = 0x0816,
- WMI_MARLON_R_READ_CMDID = 0x0818,
- WMI_MARLON_R_WRITE_CMDID = 0x0819,
- WMI_MARLON_R_TXRX_SEL_CMDID = 0x081a,
- MAC_IO_STATIC_PARAMS_CMDID = 0x081b,
- MAC_IO_DYNAMIC_PARAMS_CMDID = 0x081c,
- WMI_SILENT_RSSI_CALIB_CMDID = 0x081d,
- WMI_RF_RX_TEST_CMDID = 0x081e,
- WMI_CFG_RX_CHAIN_CMDID = 0x0820,
- WMI_VRING_CFG_CMDID = 0x0821,
- WMI_BCAST_VRING_CFG_CMDID = 0x0822,
- WMI_VRING_BA_EN_CMDID = 0x0823,
- WMI_VRING_BA_DIS_CMDID = 0x0824,
- WMI_RCP_ADDBA_RESP_CMDID = 0x0825,
- WMI_RCP_DELBA_CMDID = 0x0826,
- WMI_SET_SSID_CMDID = 0x0827,
- WMI_GET_SSID_CMDID = 0x0828,
- WMI_SET_PCP_CHANNEL_CMDID = 0x0829,
- WMI_GET_PCP_CHANNEL_CMDID = 0x082a,
- WMI_SW_TX_REQ_CMDID = 0x082b,
- WMI_READ_MAC_RXQ_CMDID = 0x0830,
- WMI_READ_MAC_TXQ_CMDID = 0x0831,
- WMI_WRITE_MAC_RXQ_CMDID = 0x0832,
- WMI_WRITE_MAC_TXQ_CMDID = 0x0833,
- WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x0834,
- WMI_MLME_PUSH_CMDID = 0x0835,
- WMI_BEAMFORMING_MGMT_CMDID = 0x0836,
- WMI_BF_TXSS_MGMT_CMDID = 0x0837,
- WMI_BF_SM_MGMT_CMDID = 0x0838,
- WMI_BF_RXSS_MGMT_CMDID = 0x0839,
- WMI_BF_TRIG_CMDID = 0x083A,
- WMI_SET_SECTORS_CMDID = 0x0849,
- WMI_MAINTAIN_PAUSE_CMDID = 0x0850,
- WMI_MAINTAIN_RESUME_CMDID = 0x0851,
- WMI_RS_MGMT_CMDID = 0x0852,
- WMI_RF_MGMT_CMDID = 0x0853,
- WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x0854,
- WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x0855,
+ WMI_CONNECT_CMDID = 0x01,
+ WMI_DISCONNECT_CMDID = 0x03,
+ WMI_DISCONNECT_STA_CMDID = 0x04,
+ WMI_START_SCAN_CMDID = 0x07,
+ WMI_SET_BSS_FILTER_CMDID = 0x09,
+ WMI_SET_PROBED_SSID_CMDID = 0x0A,
+ WMI_SET_LISTEN_INT_CMDID = 0x0B,
+ WMI_BCON_CTRL_CMDID = 0x0F,
+ WMI_ADD_CIPHER_KEY_CMDID = 0x16,
+ WMI_DELETE_CIPHER_KEY_CMDID = 0x17,
+ WMI_PCP_CONF_CMDID = 0x18,
+ WMI_SET_APPIE_CMDID = 0x3F,
+ WMI_SET_WSC_STATUS_CMDID = 0x41,
+ WMI_PXMT_RANGE_CFG_CMDID = 0x42,
+ WMI_PXMT_SNR2_RANGE_CFG_CMDID = 0x43,
+ WMI_MEM_READ_CMDID = 0x800,
+ WMI_MEM_WR_CMDID = 0x801,
+ WMI_ECHO_CMDID = 0x803,
+ WMI_DEEP_ECHO_CMDID = 0x804,
+ WMI_CONFIG_MAC_CMDID = 0x805,
+ WMI_CONFIG_PHY_DEBUG_CMDID = 0x806,
+ WMI_ADD_DEBUG_TX_PCKT_CMDID = 0x808,
+ WMI_PHY_GET_STATISTICS_CMDID = 0x809,
+ WMI_FS_TUNE_CMDID = 0x80A,
+ WMI_CORR_MEASURE_CMDID = 0x80B,
+ WMI_READ_RSSI_CMDID = 0x80C,
+ WMI_TEMP_SENSE_CMDID = 0x80E,
+ WMI_DC_CALIB_CMDID = 0x80F,
+ WMI_SEND_TONE_CMDID = 0x810,
+ WMI_IQ_TX_CALIB_CMDID = 0x811,
+ WMI_IQ_RX_CALIB_CMDID = 0x812,
+ WMI_SET_UCODE_IDLE_CMDID = 0x813,
+ WMI_SET_WORK_MODE_CMDID = 0x815,
+ WMI_LO_LEAKAGE_CALIB_CMDID = 0x816,
+ WMI_MARLON_R_READ_CMDID = 0x818,
+ WMI_MARLON_R_WRITE_CMDID = 0x819,
+ WMI_MARLON_R_TXRX_SEL_CMDID = 0x81A,
+ MAC_IO_STATIC_PARAMS_CMDID = 0x81B,
+ MAC_IO_DYNAMIC_PARAMS_CMDID = 0x81C,
+ WMI_SILENT_RSSI_CALIB_CMDID = 0x81D,
+ WMI_RF_RX_TEST_CMDID = 0x81E,
+ WMI_CFG_RX_CHAIN_CMDID = 0x820,
+ WMI_VRING_CFG_CMDID = 0x821,
+ WMI_BCAST_VRING_CFG_CMDID = 0x822,
+ WMI_VRING_BA_EN_CMDID = 0x823,
+ WMI_VRING_BA_DIS_CMDID = 0x824,
+ WMI_RCP_ADDBA_RESP_CMDID = 0x825,
+ WMI_RCP_DELBA_CMDID = 0x826,
+ WMI_SET_SSID_CMDID = 0x827,
+ WMI_GET_SSID_CMDID = 0x828,
+ WMI_SET_PCP_CHANNEL_CMDID = 0x829,
+ WMI_GET_PCP_CHANNEL_CMDID = 0x82A,
+ WMI_SW_TX_REQ_CMDID = 0x82B,
+ WMI_READ_MAC_RXQ_CMDID = 0x830,
+ WMI_READ_MAC_TXQ_CMDID = 0x831,
+ WMI_WRITE_MAC_RXQ_CMDID = 0x832,
+ WMI_WRITE_MAC_TXQ_CMDID = 0x833,
+ WMI_WRITE_MAC_XQ_FIELD_CMDID = 0x834,
+ WMI_MLME_PUSH_CMDID = 0x835,
+ WMI_BEAMFORMING_MGMT_CMDID = 0x836,
+ WMI_BF_TXSS_MGMT_CMDID = 0x837,
+ WMI_BF_SM_MGMT_CMDID = 0x838,
+ WMI_BF_RXSS_MGMT_CMDID = 0x839,
+ WMI_BF_TRIG_CMDID = 0x83A,
+ WMI_SET_SECTORS_CMDID = 0x849,
+ WMI_MAINTAIN_PAUSE_CMDID = 0x850,
+ WMI_MAINTAIN_RESUME_CMDID = 0x851,
+ WMI_RS_MGMT_CMDID = 0x852,
+ WMI_RF_MGMT_CMDID = 0x853,
+ WMI_THERMAL_THROTTLING_CTRL_CMDID = 0x854,
+ WMI_THERMAL_THROTTLING_GET_STATUS_CMDID = 0x855,
+ WMI_OTP_READ_CMDID = 0x856,
+ WMI_OTP_WRITE_CMDID = 0x857,
+ WMI_LED_CFG_CMDID = 0x858,
/* Performance monitoring commands */
- WMI_BF_CTRL_CMDID = 0x0862,
- WMI_NOTIFY_REQ_CMDID = 0x0863,
- WMI_GET_STATUS_CMDID = 0x0864,
- WMI_UNIT_TEST_CMDID = 0x0900,
- WMI_HICCUP_CMDID = 0x0901,
- WMI_FLASH_READ_CMDID = 0x0902,
- WMI_FLASH_WRITE_CMDID = 0x0903,
- WMI_SECURITY_UNIT_TEST_CMDID = 0x0904,
- /*P2P*/
- WMI_P2P_CFG_CMDID = 0x0910,
- WMI_PORT_ALLOCATE_CMDID = 0x0911,
- WMI_PORT_DELETE_CMDID = 0x0912,
- WMI_POWER_MGMT_CFG_CMDID = 0x0913,
- WMI_START_LISTEN_CMDID = 0x0914,
- WMI_START_SEARCH_CMDID = 0x0915,
- WMI_DISCOVERY_START_CMDID = 0x0916,
- WMI_DISCOVERY_STOP_CMDID = 0x0917,
- WMI_PCP_START_CMDID = 0x0918,
- WMI_PCP_STOP_CMDID = 0x0919,
- WMI_GET_PCP_FACTOR_CMDID = 0x091b,
-
- WMI_SET_MAC_ADDRESS_CMDID = 0xf003,
- WMI_ABORT_SCAN_CMDID = 0xf007,
- WMI_SET_PMK_CMDID = 0xf028,
-
- WMI_SET_PROMISCUOUS_MODE_CMDID = 0xf041,
- WMI_GET_PMK_CMDID = 0xf048,
- WMI_SET_PASSPHRASE_CMDID = 0xf049,
- WMI_SEND_ASSOC_RES_CMDID = 0xf04a,
- WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xf04b,
- WMI_EAPOL_TX_CMDID = 0xf04c,
- WMI_MAC_ADDR_REQ_CMDID = 0xf04d,
- WMI_FW_VER_CMDID = 0xf04e,
- WMI_PMC_CMDID = 0xf04f,
+ WMI_BF_CTRL_CMDID = 0x862,
+ WMI_NOTIFY_REQ_CMDID = 0x863,
+ WMI_GET_STATUS_CMDID = 0x864,
+ WMI_UNIT_TEST_CMDID = 0x900,
+ WMI_HICCUP_CMDID = 0x901,
+ WMI_FLASH_READ_CMDID = 0x902,
+ WMI_FLASH_WRITE_CMDID = 0x903,
+ /* P2P */
+ WMI_P2P_CFG_CMDID = 0x910,
+ WMI_PORT_ALLOCATE_CMDID = 0x911,
+ WMI_PORT_DELETE_CMDID = 0x912,
+ WMI_POWER_MGMT_CFG_CMDID = 0x913,
+ WMI_START_LISTEN_CMDID = 0x914,
+ WMI_START_SEARCH_CMDID = 0x915,
+ WMI_DISCOVERY_START_CMDID = 0x916,
+ WMI_DISCOVERY_STOP_CMDID = 0x917,
+ WMI_PCP_START_CMDID = 0x918,
+ WMI_PCP_STOP_CMDID = 0x919,
+ WMI_GET_PCP_FACTOR_CMDID = 0x91B,
+ WMI_SET_MAC_ADDRESS_CMDID = 0xF003,
+ WMI_ABORT_SCAN_CMDID = 0xF007,
+ WMI_SET_PROMISCUOUS_MODE_CMDID = 0xF041,
+ WMI_GET_PMK_CMDID = 0xF048,
+ WMI_SET_PASSPHRASE_CMDID = 0xF049,
+ WMI_SEND_ASSOC_RES_CMDID = 0xF04A,
+ WMI_SET_ASSOC_REQ_RELAY_CMDID = 0xF04B,
+ WMI_MAC_ADDR_REQ_CMDID = 0xF04D,
+ WMI_FW_VER_CMDID = 0xF04E,
+ WMI_PMC_CMDID = 0xF04F,
};
-/*
- * Commands data structures
- */
-
-/*
- * WMI_CONNECT_CMDID
- */
+/* WMI_CONNECT_CMDID */
enum wmi_network_type {
WMI_NETTYPE_INFRA = 0x01,
WMI_NETTYPE_ADHOC = 0x02,
WMI_NETTYPE_ADHOC_CREATOR = 0x04,
WMI_NETTYPE_AP = 0x10,
WMI_NETTYPE_P2P = 0x20,
- WMI_NETTYPE_WBE = 0x40, /* PCIE over 60g */
+ /* PCIE over 60g */
+ WMI_NETTYPE_WBE = 0x40,
};
enum wmi_dot11_auth_mode {
- WMI_AUTH11_OPEN = 0x01,
- WMI_AUTH11_SHARED = 0x02,
- WMI_AUTH11_LEAP = 0x04,
- WMI_AUTH11_WSC = 0x08,
+ WMI_AUTH11_OPEN = 0x01,
+ WMI_AUTH11_SHARED = 0x02,
+ WMI_AUTH11_LEAP = 0x04,
+ WMI_AUTH11_WSC = 0x08,
};
enum wmi_auth_mode {
- WMI_AUTH_NONE = 0x01,
- WMI_AUTH_WPA = 0x02,
- WMI_AUTH_WPA2 = 0x04,
- WMI_AUTH_WPA_PSK = 0x08,
- WMI_AUTH_WPA2_PSK = 0x10,
- WMI_AUTH_WPA_CCKM = 0x20,
- WMI_AUTH_WPA2_CCKM = 0x40,
+ WMI_AUTH_NONE = 0x01,
+ WMI_AUTH_WPA = 0x02,
+ WMI_AUTH_WPA2 = 0x04,
+ WMI_AUTH_WPA_PSK = 0x08,
+ WMI_AUTH_WPA2_PSK = 0x10,
+ WMI_AUTH_WPA_CCKM = 0x20,
+ WMI_AUTH_WPA2_CCKM = 0x40,
};
enum wmi_crypto_type {
- WMI_CRYPT_NONE = 0x01,
- WMI_CRYPT_WEP = 0x02,
- WMI_CRYPT_TKIP = 0x04,
- WMI_CRYPT_AES = 0x08,
- WMI_CRYPT_AES_GCMP = 0x20,
+ WMI_CRYPT_NONE = 0x01,
+ WMI_CRYPT_AES_GCMP = 0x20,
};
enum wmi_connect_ctrl_flag_bits {
- WMI_CONNECT_ASSOC_POLICY_USER = 0x0001,
- WMI_CONNECT_SEND_REASSOC = 0x0002,
- WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x0004,
- WMI_CONNECT_PROFILE_MATCH_DONE = 0x0008,
- WMI_CONNECT_IGNORE_AAC_BEACON = 0x0010,
- WMI_CONNECT_CSA_FOLLOW_BSS = 0x0020,
- WMI_CONNECT_DO_WPA_OFFLOAD = 0x0040,
- WMI_CONNECT_DO_NOT_DEAUTH = 0x0080,
+ WMI_CONNECT_ASSOC_POLICY_USER = 0x01,
+ WMI_CONNECT_SEND_REASSOC = 0x02,
+ WMI_CONNECT_IGNORE_WPA_GROUP_CIPHER = 0x04,
+ WMI_CONNECT_PROFILE_MATCH_DONE = 0x08,
+ WMI_CONNECT_IGNORE_AAC_BEACON = 0x10,
+ WMI_CONNECT_CSA_FOLLOW_BSS = 0x20,
+ WMI_CONNECT_DO_WPA_OFFLOAD = 0x40,
+ WMI_CONNECT_DO_NOT_DEAUTH = 0x80,
};
-#define WMI_MAX_SSID_LEN (32)
+#define WMI_MAX_SSID_LEN (32)
+/* WMI_CONNECT_CMDID */
struct wmi_connect_cmd {
u8 network_type;
u8 dot11_auth_mode;
@@ -216,31 +227,17 @@ struct wmi_connect_cmd {
u8 reserved1[2];
} __packed;
-/*
- * WMI_DISCONNECT_STA_CMDID
- */
+/* WMI_DISCONNECT_STA_CMDID */
struct wmi_disconnect_sta_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 disconnect_reason;
} __packed;
-/*
- * WMI_SET_PMK_CMDID
- */
-
-#define WMI_MIN_KEY_INDEX (0)
#define WMI_MAX_KEY_INDEX (3)
#define WMI_MAX_KEY_LEN (32)
#define WMI_PASSPHRASE_LEN (64)
-#define WMI_PMK_LEN (32)
-struct wmi_set_pmk_cmd {
- u8 pmk[WMI_PMK_LEN];
-} __packed;
-
-/*
- * WMI_SET_PASSPHRASE_CMDID
- */
+/* WMI_SET_PASSPHRASE_CMDID */
struct wmi_set_passphrase_cmd {
u8 ssid[WMI_MAX_SSID_LEN];
u8 passphrase[WMI_PASSPHRASE_LEN];
@@ -248,36 +245,34 @@ struct wmi_set_passphrase_cmd {
u8 passphrase_len;
} __packed;
-/*
- * WMI_ADD_CIPHER_KEY_CMDID
- */
+/* WMI_ADD_CIPHER_KEY_CMDID */
enum wmi_key_usage {
- WMI_KEY_USE_PAIRWISE = 0,
- WMI_KEY_USE_RX_GROUP = 1,
- WMI_KEY_USE_TX_GROUP = 2,
+ WMI_KEY_USE_PAIRWISE = 0x00,
+ WMI_KEY_USE_RX_GROUP = 0x01,
+ WMI_KEY_USE_TX_GROUP = 0x02,
};
struct wmi_add_cipher_key_cmd {
u8 key_index;
u8 key_type;
- u8 key_usage; /* enum wmi_key_usage */
+ /* enum wmi_key_usage */
+ u8 key_usage;
u8 key_len;
- u8 key_rsc[8]; /* key replay sequence counter */
+ /* key replay sequence counter */
+ u8 key_rsc[8];
u8 key[WMI_MAX_KEY_LEN];
- u8 key_op_ctrl; /* Additional Key Control information */
+ /* Additional Key Control information */
+ u8 key_op_ctrl;
u8 mac[WMI_MAC_LEN];
} __packed;
-/*
- * WMI_DELETE_CIPHER_KEY_CMDID
- */
+/* WMI_DELETE_CIPHER_KEY_CMDID */
struct wmi_delete_cipher_key_cmd {
u8 key_index;
u8 mac[WMI_MAC_LEN];
} __packed;
-/*
- * WMI_START_SCAN_CMDID
+/* WMI_START_SCAN_CMDID
*
* Start L1 scan operation
*
@@ -286,146 +281,142 @@ struct wmi_delete_cipher_key_cmd {
* - WMI_SCAN_COMPLETE_EVENTID
*/
enum wmi_scan_type {
- WMI_LONG_SCAN = 0,
- WMI_SHORT_SCAN = 1,
- WMI_PBC_SCAN = 2,
- WMI_DIRECT_SCAN = 3,
- WMI_ACTIVE_SCAN = 4,
+ WMI_ACTIVE_SCAN = 0x00,
+ WMI_SHORT_SCAN = 0x01,
+ WMI_PASSIVE_SCAN = 0x02,
+ WMI_DIRECT_SCAN = 0x03,
+ WMI_LONG_SCAN = 0x04,
};
+/* WMI_START_SCAN_CMDID */
struct wmi_start_scan_cmd {
- u8 direct_scan_mac_addr[6];
- u8 reserved[2];
- __le32 home_dwell_time; /* Max duration in the home channel(ms) */
- __le32 force_scan_interval; /* Time interval between scans (ms)*/
- u8 scan_type; /* wmi_scan_type */
- u8 num_channels; /* how many channels follow */
+ u8 direct_scan_mac_addr[WMI_MAC_LEN];
+ /* DMG Beacon frame is transmitted during active scanning */
+ u8 discovery_mode;
+ /* reserved */
+ u8 reserved;
+ /* Max duration in the home channel(ms) */
+ __le32 dwell_time;
+ /* Time interval between scans (ms) */
+ __le32 force_scan_interval;
+ /* enum wmi_scan_type */
+ u8 scan_type;
+ /* how many channels follow */
+ u8 num_channels;
+ /* channels ID's:
+ * 0 - 58320 MHz
+ * 1 - 60480 MHz
+ * 2 - 62640 MHz
+ */
struct {
u8 channel;
u8 reserved;
- } channel_list[0]; /* channels ID's */
- /* 0 - 58320 MHz */
- /* 1 - 60480 MHz */
- /* 2 - 62640 MHz */
+ } channel_list[0];
} __packed;
-/*
- * WMI_SET_PROBED_SSID_CMDID
- */
+/* WMI_SET_PROBED_SSID_CMDID */
#define MAX_PROBED_SSID_INDEX (3)
enum wmi_ssid_flag {
- WMI_SSID_FLAG_DISABLE = 0, /* disables entry */
- WMI_SSID_FLAG_SPECIFIC = 1, /* probes specified ssid */
- WMI_SSID_FLAG_ANY = 2, /* probes for any ssid */
+ /* disables entry */
+ WMI_SSID_FLAG_DISABLE = 0x00,
+ /* probes specified ssid */
+ WMI_SSID_FLAG_SPECIFIC = 0x01,
+ /* probes for any ssid */
+ WMI_SSID_FLAG_ANY = 0x02,
};
struct wmi_probed_ssid_cmd {
- u8 entry_index; /* 0 to MAX_PROBED_SSID_INDEX */
- u8 flag; /* enum wmi_ssid_flag */
+ /* 0 to MAX_PROBED_SSID_INDEX */
+ u8 entry_index;
+ /* enum wmi_ssid_flag */
+ u8 flag;
u8 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_SET_APPIE_CMDID
+/* WMI_SET_APPIE_CMDID
* Add Application specified IE to a management frame
*/
-#define WMI_MAX_IE_LEN (1024)
+#define WMI_MAX_IE_LEN (1024)
-/*
- * Frame Types
- */
+/* Frame Types */
enum wmi_mgmt_frame_type {
- WMI_FRAME_BEACON = 0,
- WMI_FRAME_PROBE_REQ = 1,
- WMI_FRAME_PROBE_RESP = 2,
- WMI_FRAME_ASSOC_REQ = 3,
- WMI_FRAME_ASSOC_RESP = 4,
- WMI_NUM_MGMT_FRAME,
+ WMI_FRAME_BEACON = 0x00,
+ WMI_FRAME_PROBE_REQ = 0x01,
+ WMI_FRAME_PROBE_RESP = 0x02,
+ WMI_FRAME_ASSOC_REQ = 0x03,
+ WMI_FRAME_ASSOC_RESP = 0x04,
+ WMI_NUM_MGMT_FRAME = 0x05,
};
struct wmi_set_appie_cmd {
- u8 mgmt_frm_type; /* enum wmi_mgmt_frame_type */
+ /* enum wmi_mgmt_frame_type */
+ u8 mgmt_frm_type;
u8 reserved;
- __le16 ie_len; /* Length of the IE to be added to MGMT frame */
+ /* Length of the IE to be added to MGMT frame */
+ __le16 ie_len;
u8 ie_info[0];
} __packed;
-/*
- * WMI_PXMT_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_RANGE_CFG_CMDID */
struct wmi_pxmt_range_cfg_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 range;
} __packed;
-/*
- * WMI_PXMT_SNR2_RANGE_CFG_CMDID
- */
+/* WMI_PXMT_SNR2_RANGE_CFG_CMDID */
struct wmi_pxmt_snr2_range_cfg_cmd {
- s8 snr2range_arr[WMI_PROX_RANGE_NUM-1];
+ s8 snr2range_arr[2];
} __packed;
-/*
- * WMI_RF_MGMT_CMDID
- */
+/* WMI_RF_MGMT_CMDID */
enum wmi_rf_mgmt_type {
- WMI_RF_MGMT_W_DISABLE = 0,
- WMI_RF_MGMT_W_ENABLE = 1,
- WMI_RF_MGMT_GET_STATUS = 2,
+ WMI_RF_MGMT_W_DISABLE = 0x00,
+ WMI_RF_MGMT_W_ENABLE = 0x01,
+ WMI_RF_MGMT_GET_STATUS = 0x02,
};
+/* WMI_RF_MGMT_CMDID */
struct wmi_rf_mgmt_cmd {
__le32 rf_mgmt_type;
} __packed;
-/*
- * WMI_THERMAL_THROTTLING_CTRL_CMDID
- */
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
#define THERMAL_THROTTLING_USE_DEFAULT_MAX_TXOP_LENGTH (0xFFFFFFFF)
+/* WMI_THERMAL_THROTTLING_CTRL_CMDID */
struct wmi_thermal_throttling_ctrl_cmd {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
-/*
- * WMI_RF_RX_TEST_CMDID
- */
+/* WMI_RF_RX_TEST_CMDID */
struct wmi_rf_rx_test_cmd {
__le32 sector;
} __packed;
-/*
- * WMI_CORR_MEASURE_CMDID
- */
+/* WMI_CORR_MEASURE_CMDID */
struct wmi_corr_measure_cmd {
- s32 freq_mhz;
+ __le32 freq_mhz;
__le32 length_samples;
__le32 iterations;
} __packed;
-/*
- * WMI_SET_SSID_CMDID
- */
+/* WMI_SET_SSID_CMDID */
struct wmi_set_ssid_cmd {
__le32 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_SET_PCP_CHANNEL_CMDID
- */
+/* WMI_SET_PCP_CHANNEL_CMDID */
struct wmi_set_pcp_channel_cmd {
u8 channel;
u8 reserved[3];
} __packed;
-/*
- * WMI_BCON_CTRL_CMDID
- */
+/* WMI_BCON_CTRL_CMDID */
struct wmi_bcon_ctrl_cmd {
__le16 bcon_interval;
__le16 frag_num;
@@ -434,214 +425,192 @@ struct wmi_bcon_ctrl_cmd {
u8 pcp_max_assoc_sta;
u8 disable_sec_offload;
u8 disable_sec;
+ u8 hidden_ssid;
+ u8 is_go;
+ u8 reserved[2];
} __packed;
-/******* P2P ***********/
-
-/*
- * WMI_PORT_ALLOCATE_CMDID
- */
+/* WMI_PORT_ALLOCATE_CMDID */
enum wmi_port_role {
- WMI_PORT_STA = 0,
- WMI_PORT_PCP = 1,
- WMI_PORT_AP = 2,
- WMI_PORT_P2P_DEV = 3,
- WMI_PORT_P2P_CLIENT = 4,
- WMI_PORT_P2P_GO = 5,
+ WMI_PORT_STA = 0x00,
+ WMI_PORT_PCP = 0x01,
+ WMI_PORT_AP = 0x02,
+ WMI_PORT_P2P_DEV = 0x03,
+ WMI_PORT_P2P_CLIENT = 0x04,
+ WMI_PORT_P2P_GO = 0x05,
};
+/* WMI_PORT_ALLOCATE_CMDID */
struct wmi_port_allocate_cmd {
u8 mac[WMI_MAC_LEN];
u8 port_role;
u8 mid;
} __packed;
-/*
- * WMI_PORT_DELETE_CMDID
- */
-struct wmi_delete_port_cmd {
+/* WMI_PORT_DELETE_CMDID */
+struct wmi_port_delete_cmd {
u8 mid;
u8 reserved[3];
} __packed;
-/*
- * WMI_P2P_CFG_CMDID
- */
+/* WMI_P2P_CFG_CMDID */
enum wmi_discovery_mode {
- WMI_DISCOVERY_MODE_NON_OFFLOAD = 0,
- WMI_DISCOVERY_MODE_OFFLOAD = 1,
- WMI_DISCOVERY_MODE_PEER2PEER = 2,
+ WMI_DISCOVERY_MODE_NON_OFFLOAD = 0x00,
+ WMI_DISCOVERY_MODE_OFFLOAD = 0x01,
+ WMI_DISCOVERY_MODE_PEER2PEER = 0x02,
};
struct wmi_p2p_cfg_cmd {
- u8 discovery_mode; /* wmi_discovery_mode */
+ /* enum wmi_discovery_mode */
+ u8 discovery_mode;
u8 channel;
- __le16 bcon_interval; /* base to listen/search duration calculation */
+ /* base to listen/search duration calculation */
+ __le16 bcon_interval;
} __packed;
-/*
- * WMI_POWER_MGMT_CFG_CMDID
- */
+/* WMI_POWER_MGMT_CFG_CMDID */
enum wmi_power_source_type {
- WMI_POWER_SOURCE_BATTERY = 0,
- WMI_POWER_SOURCE_OTHER = 1,
+ WMI_POWER_SOURCE_BATTERY = 0x00,
+ WMI_POWER_SOURCE_OTHER = 0x01,
};
struct wmi_power_mgmt_cfg_cmd {
- u8 power_source; /* wmi_power_source_type */
+ /* enum wmi_power_source_type */
+ u8 power_source;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_START_CMDID
- */
-
-enum wmi_hidden_ssid {
- WMI_HIDDEN_SSID_DISABLED = 0,
- WMI_HIDDEN_SSID_SEND_EMPTY = 1,
- WMI_HIDDEN_SSID_CLEAR = 2,
-};
-
+/* WMI_PCP_START_CMDID */
struct wmi_pcp_start_cmd {
__le16 bcon_interval;
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
- u8 reserved0[8];
+ u8 is_go;
+ u8 reserved0[7];
u8 network_type;
u8 channel;
u8 disable_sec_offload;
u8 disable_sec;
} __packed;
-/*
- * WMI_SW_TX_REQ_CMDID
- */
+/* WMI_SW_TX_REQ_CMDID */
struct wmi_sw_tx_req_cmd {
u8 dst_mac[WMI_MAC_LEN];
__le16 len;
u8 payload[0];
} __packed;
-/*
- * WMI_VRING_CFG_CMDID
- */
-
struct wmi_sw_ring_cfg {
__le64 ring_mem_base;
__le16 ring_size;
__le16 max_mpdu_size;
} __packed;
+/* wmi_vring_cfg_schd */
struct wmi_vring_cfg_schd {
__le16 priority;
__le16 timeslot_us;
} __packed;
enum wmi_vring_cfg_encap_trans_type {
- WMI_VRING_ENC_TYPE_802_3 = 0,
- WMI_VRING_ENC_TYPE_NATIVE_WIFI = 1,
+ WMI_VRING_ENC_TYPE_802_3 = 0x00,
+ WMI_VRING_ENC_TYPE_NATIVE_WIFI = 0x01,
};
enum wmi_vring_cfg_ds_cfg {
- WMI_VRING_DS_PBSS = 0,
- WMI_VRING_DS_STATION = 1,
- WMI_VRING_DS_AP = 2,
- WMI_VRING_DS_ADDR4 = 3,
+ WMI_VRING_DS_PBSS = 0x00,
+ WMI_VRING_DS_STATION = 0x01,
+ WMI_VRING_DS_AP = 0x02,
+ WMI_VRING_DS_ADDR4 = 0x03,
};
enum wmi_vring_cfg_nwifi_ds_trans_type {
- WMI_NWIFI_TX_TRANS_MODE_NO = 0,
- WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 1,
- WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 2,
+ WMI_NWIFI_TX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_TX_TRANS_MODE_AP2PBSS = 0x01,
+ WMI_NWIFI_TX_TRANS_MODE_STA2PBSS = 0x02,
};
enum wmi_vring_cfg_schd_params_priority {
- WMI_SCH_PRIO_REGULAR = 0,
- WMI_SCH_PRIO_HIGH = 1,
+ WMI_SCH_PRIO_REGULAR = 0x00,
+ WMI_SCH_PRIO_HIGH = 0x01,
};
-#define CIDXTID_CID_POS (0)
-#define CIDXTID_CID_LEN (4)
-#define CIDXTID_CID_MSK (0xF)
-#define CIDXTID_TID_POS (4)
-#define CIDXTID_TID_LEN (4)
-#define CIDXTID_TID_MSK (0xF0)
+#define CIDXTID_CID_POS (0)
+#define CIDXTID_CID_LEN (4)
+#define CIDXTID_CID_MSK (0xF)
+#define CIDXTID_TID_POS (4)
+#define CIDXTID_TID_LEN (4)
+#define CIDXTID_TID_MSK (0xF0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
+#define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
+#define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
+#define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
+#define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
struct wmi_vring_cfg {
struct wmi_sw_ring_cfg tx_sw_ring;
- u8 ringid; /* 0-23 vrings */
-
+ /* 0-23 vrings */
+ u8 ringid;
u8 cidxtid;
-
u8 encap_trans_type;
- u8 ds_cfg; /* 802.3 DS cfg */
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
u8 nwifi_ds_trans_type;
-
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_POS (0)
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_LEN (1)
- #define VRING_CFG_MAC_CTRL_LIFETIME_EN_MSK (0x1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_POS (1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_LEN (1)
- #define VRING_CFG_MAC_CTRL_AGGR_EN_MSK (0x2)
u8 mac_ctrl;
-
- #define VRING_CFG_TO_RESOLUTION_VALUE_POS (0)
- #define VRING_CFG_TO_RESOLUTION_VALUE_LEN (6)
- #define VRING_CFG_TO_RESOLUTION_VALUE_MSK (0x3F)
u8 to_resolution;
u8 agg_max_wsize;
struct wmi_vring_cfg_schd schd_params;
} __packed;
enum wmi_vring_cfg_cmd_action {
- WMI_VRING_CMD_ADD = 0,
- WMI_VRING_CMD_MODIFY = 1,
- WMI_VRING_CMD_DELETE = 2,
+ WMI_VRING_CMD_ADD = 0x00,
+ WMI_VRING_CMD_MODIFY = 0x01,
+ WMI_VRING_CMD_DELETE = 0x02,
};
+/* WMI_VRING_CFG_CMDID */
struct wmi_vring_cfg_cmd {
__le32 action;
struct wmi_vring_cfg vring_cfg;
} __packed;
-/*
- * WMI_BCAST_VRING_CFG_CMDID
- */
struct wmi_bcast_vring_cfg {
struct wmi_sw_ring_cfg tx_sw_ring;
- u8 ringid; /* 0-23 vrings */
+ /* 0-23 vrings */
+ u8 ringid;
u8 encap_trans_type;
- u8 ds_cfg; /* 802.3 DS cfg */
+ /* 802.3 DS cfg */
+ u8 ds_cfg;
u8 nwifi_ds_trans_type;
} __packed;
+/* WMI_BCAST_VRING_CFG_CMDID */
struct wmi_bcast_vring_cfg_cmd {
__le32 action;
struct wmi_bcast_vring_cfg vring_cfg;
} __packed;
-/*
- * WMI_VRING_BA_EN_CMDID
- */
+/* WMI_VRING_BA_EN_CMDID */
struct wmi_vring_ba_en_cmd {
u8 ringid;
u8 agg_max_wsize;
__le16 ba_timeout;
u8 amsdu;
+ u8 reserved[3];
} __packed;
-/*
- * WMI_VRING_BA_DIS_CMDID
- */
+/* WMI_VRING_BA_DIS_CMDID */
struct wmi_vring_ba_dis_cmd {
u8 ringid;
u8 reserved;
__le16 reason;
} __packed;
-/*
- * WMI_NOTIFY_REQ_CMDID
- */
+/* WMI_NOTIFY_REQ_CMDID */
struct wmi_notify_req_cmd {
u8 cid;
u8 year;
@@ -654,102 +623,100 @@ struct wmi_notify_req_cmd {
u8 miliseconds;
} __packed;
-/*
- * WMI_CFG_RX_CHAIN_CMDID
- */
+/* WMI_CFG_RX_CHAIN_CMDID */
enum wmi_sniffer_cfg_mode {
- WMI_SNIFFER_OFF = 0,
- WMI_SNIFFER_ON = 1,
+ WMI_SNIFFER_OFF = 0x00,
+ WMI_SNIFFER_ON = 0x01,
};
enum wmi_sniffer_cfg_phy_info_mode {
- WMI_SNIFFER_PHY_INFO_DISABLED = 0,
- WMI_SNIFFER_PHY_INFO_ENABLED = 1,
+ WMI_SNIFFER_PHY_INFO_DISABLED = 0x00,
+ WMI_SNIFFER_PHY_INFO_ENABLED = 0x01,
};
enum wmi_sniffer_cfg_phy_support {
- WMI_SNIFFER_CP = 0,
- WMI_SNIFFER_DP = 1,
- WMI_SNIFFER_BOTH_PHYS = 2,
+ WMI_SNIFFER_CP = 0x00,
+ WMI_SNIFFER_DP = 0x01,
+ WMI_SNIFFER_BOTH_PHYS = 0x02,
};
+/* wmi_sniffer_cfg */
struct wmi_sniffer_cfg {
- __le32 mode; /* enum wmi_sniffer_cfg_mode */
- __le32 phy_info_mode; /* enum wmi_sniffer_cfg_phy_info_mode */
- __le32 phy_support; /* enum wmi_sniffer_cfg_phy_support */
+ /* enum wmi_sniffer_cfg_mode */
+ __le32 mode;
+ /* enum wmi_sniffer_cfg_phy_info_mode */
+ __le32 phy_info_mode;
+ /* enum wmi_sniffer_cfg_phy_support */
+ __le32 phy_support;
u8 channel;
u8 reserved[3];
} __packed;
enum wmi_cfg_rx_chain_cmd_action {
- WMI_RX_CHAIN_ADD = 0,
- WMI_RX_CHAIN_DEL = 1,
+ WMI_RX_CHAIN_ADD = 0x00,
+ WMI_RX_CHAIN_DEL = 0x01,
};
enum wmi_cfg_rx_chain_cmd_decap_trans_type {
- WMI_DECAP_TYPE_802_3 = 0,
- WMI_DECAP_TYPE_NATIVE_WIFI = 1,
- WMI_DECAP_TYPE_NONE = 2,
+ WMI_DECAP_TYPE_802_3 = 0x00,
+ WMI_DECAP_TYPE_NATIVE_WIFI = 0x01,
+ WMI_DECAP_TYPE_NONE = 0x02,
};
enum wmi_cfg_rx_chain_cmd_nwifi_ds_trans_type {
- WMI_NWIFI_RX_TRANS_MODE_NO = 0,
- WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 1,
- WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 2,
+ WMI_NWIFI_RX_TRANS_MODE_NO = 0x00,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2AP = 0x01,
+ WMI_NWIFI_RX_TRANS_MODE_PBSS2STA = 0x02,
};
enum wmi_cfg_rx_chain_cmd_reorder_type {
- WMI_RX_HW_REORDER = 0,
- WMI_RX_SW_REORDER = 1,
+ WMI_RX_HW_REORDER = 0x00,
+ WMI_RX_SW_REORDER = 0x01,
};
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
+#define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
+#define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
+#define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
+#define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
+
+/* WMI_CFG_RX_CHAIN_CMDID */
struct wmi_cfg_rx_chain_cmd {
__le32 action;
struct wmi_sw_ring_cfg rx_sw_ring;
u8 mid;
u8 decap_trans_type;
-
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_POS (0)
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_LEN (1)
- #define L2_802_3_OFFLOAD_CTRL_VLAN_TAG_INSERTION_MSK (0x1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_POS (1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_LEN (1)
- #define L2_802_3_OFFLOAD_CTRL_SNAP_KEEP_MSK (0x2)
u8 l2_802_3_offload_ctrl;
-
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_POS (0)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_LEN (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_QOS_MSK (0x1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_POS (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_LEN (1)
- #define L2_NWIFI_OFFLOAD_CTRL_REMOVE_PN_MSK (0x2)
u8 l2_nwifi_offload_ctrl;
-
u8 vlan_id;
u8 nwifi_ds_trans_type;
-
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_POS (0)
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_LEN (1)
- #define L3_L4_CTRL_IPV4_CHECKSUM_EN_MSK (0x1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS (1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_LEN (1)
- #define L3_L4_CTRL_TCPIP_CHECKSUM_EN_MSK (0x2)
u8 l3_l4_ctrl;
-
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_POS (0)
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_PREFETCH_THRSH_MSK (0x1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_POS (1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_WB_THRSH_MSK (0x2)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_POS (2)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_ITR_THRSH_MSK (0x4)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_POS (3)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_LEN (1)
- #define RING_CTRL_OVERRIDE_HOST_THRSH_MSK (0x8)
u8 ring_ctrl;
-
__le16 prefetch_thrsh;
__le16 wb_thrsh;
__le32 itr_value;
@@ -757,31 +724,27 @@ struct wmi_cfg_rx_chain_cmd {
u8 reorder_type;
u8 reserved;
struct wmi_sniffer_cfg sniffer_cfg;
+ __le16 max_rx_pl_per_desc;
} __packed;
-/*
- * WMI_RCP_ADDBA_RESP_CMDID
- */
+/* WMI_RCP_ADDBA_RESP_CMDID */
struct wmi_rcp_addba_resp_cmd {
u8 cidxtid;
u8 dialog_token;
__le16 status_code;
- __le16 ba_param_set; /* ieee80211_ba_parameterset field to send */
+ /* ieee80211_ba_parameterset field to send */
+ __le16 ba_param_set;
__le16 ba_timeout;
} __packed;
-/*
- * WMI_RCP_DELBA_CMDID
- */
+/* WMI_RCP_DELBA_CMDID */
struct wmi_rcp_delba_cmd {
u8 cidxtid;
u8 reserved;
__le16 reason;
} __packed;
-/*
- * WMI_RCP_ADDBA_REQ_CMDID
- */
+/* WMI_RCP_ADDBA_REQ_CMDID */
struct wmi_rcp_addba_req_cmd {
u8 cidxtid;
u8 dialog_token;
@@ -792,32 +755,16 @@ struct wmi_rcp_addba_req_cmd {
__le16 ba_seq_ctrl;
} __packed;
-/*
- * WMI_SET_MAC_ADDRESS_CMDID
- */
+/* WMI_SET_MAC_ADDRESS_CMDID */
struct wmi_set_mac_address_cmd {
u8 mac[WMI_MAC_LEN];
u8 reserved[2];
} __packed;
-/*
-* WMI_EAPOL_TX_CMDID
-*/
-struct wmi_eapol_tx_cmd {
- u8 dst_mac[WMI_MAC_LEN];
- __le16 eapol_len;
- u8 eapol[0];
-} __packed;
-
-/*
- * WMI_ECHO_CMDID
- *
+/* WMI_ECHO_CMDID
* Check FW is alive
- *
* WMI_DEEP_ECHO_CMDID
- *
* Check FW and ucode are alive
- *
* Returned event: WMI_ECHO_RSP_EVENTID
* same event for both commands
*/
@@ -825,70 +772,79 @@ struct wmi_echo_cmd {
__le32 value;
} __packed;
-/*
- * WMI_TEMP_SENSE_CMDID
+/* WMI_OTP_READ_CMDID */
+struct wmi_otp_read_cmd {
+ __le32 addr;
+ __le32 size;
+ __le32 values;
+} __packed;
+
+/* WMI_OTP_WRITE_CMDID */
+struct wmi_otp_write_cmd {
+ __le32 addr;
+ __le32 size;
+ __le32 values;
+} __packed;
+
+/* WMI_TEMP_SENSE_CMDID
*
* Measure MAC and radio temperatures
+ *
+ * Possible modes for temperature measurement
*/
-
-/* Possible modes for temperature measurement */
enum wmi_temperature_measure_mode {
- TEMPERATURE_USE_OLD_VALUE = 0x1,
- TEMPERATURE_MEASURE_NOW = 0x2,
+ TEMPERATURE_USE_OLD_VALUE = 0x01,
+ TEMPERATURE_MEASURE_NOW = 0x02,
};
+/* WMI_TEMP_SENSE_CMDID */
struct wmi_temp_sense_cmd {
__le32 measure_baseband_en;
__le32 measure_rf_en;
__le32 measure_mode;
} __packed;
-/*
- * WMI_PMC_CMDID
- */
-enum wmi_pmc_op_e {
- WMI_PMC_ALLOCATE = 0,
- WMI_PMC_RELEASE = 1,
+enum wmi_pmc_op {
+ WMI_PMC_ALLOCATE = 0x00,
+ WMI_PMC_RELEASE = 0x01,
};
+/* WMI_PMC_CMDID */
struct wmi_pmc_cmd {
- u8 op; /* enum wmi_pmc_cmd_op_type */
+ /* enum wmi_pmc_cmd_op_type */
+ u8 op;
u8 reserved;
__le16 ring_size;
__le64 mem_base;
} __packed;
-/*
- * WMI Events
- */
-
-/*
+/* WMI Events
* List of Events (target to host)
*/
enum wmi_event_id {
WMI_READY_EVENTID = 0x1001,
WMI_CONNECT_EVENTID = 0x1002,
WMI_DISCONNECT_EVENTID = 0x1003,
- WMI_SCAN_COMPLETE_EVENTID = 0x100a,
- WMI_REPORT_STATISTICS_EVENTID = 0x100b,
+ WMI_SCAN_COMPLETE_EVENTID = 0x100A,
+ WMI_REPORT_STATISTICS_EVENTID = 0x100B,
WMI_RD_MEM_RSP_EVENTID = 0x1800,
WMI_FW_READY_EVENTID = 0x1801,
- WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x0200,
+ WMI_EXIT_FAST_MEM_ACC_MODE_EVENTID = 0x200,
WMI_ECHO_RSP_EVENTID = 0x1803,
- WMI_FS_TUNE_DONE_EVENTID = 0x180a,
- WMI_CORR_MEASURE_EVENTID = 0x180b,
- WMI_READ_RSSI_EVENTID = 0x180c,
- WMI_TEMP_SENSE_DONE_EVENTID = 0x180e,
- WMI_DC_CALIB_DONE_EVENTID = 0x180f,
+ WMI_FS_TUNE_DONE_EVENTID = 0x180A,
+ WMI_CORR_MEASURE_EVENTID = 0x180B,
+ WMI_READ_RSSI_EVENTID = 0x180C,
+ WMI_TEMP_SENSE_DONE_EVENTID = 0x180E,
+ WMI_DC_CALIB_DONE_EVENTID = 0x180F,
WMI_IQ_TX_CALIB_DONE_EVENTID = 0x1811,
WMI_IQ_RX_CALIB_DONE_EVENTID = 0x1812,
WMI_SET_WORK_MODE_DONE_EVENTID = 0x1815,
WMI_LO_LEAKAGE_CALIB_DONE_EVENTID = 0x1816,
WMI_MARLON_R_READ_DONE_EVENTID = 0x1818,
WMI_MARLON_R_WRITE_DONE_EVENTID = 0x1819,
- WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181a,
- WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181d,
- WMI_RF_RX_TEST_DONE_EVENTID = 0x181e,
+ WMI_MARLON_R_TXRX_SEL_DONE_EVENTID = 0x181A,
+ WMI_SILENT_RSSI_CALIB_DONE_EVENTID = 0x181D,
+ WMI_RF_RX_TEST_DONE_EVENTID = 0x181E,
WMI_CFG_RX_CHAIN_DONE_EVENTID = 0x1820,
WMI_VRING_CFG_DONE_EVENTID = 0x1821,
WMI_BA_STATUS_EVENTID = 0x1823,
@@ -896,15 +852,13 @@ enum wmi_event_id {
WMI_RCP_ADDBA_RESP_SENT_EVENTID = 0x1825,
WMI_DELBA_EVENTID = 0x1826,
WMI_GET_SSID_EVENTID = 0x1828,
- WMI_GET_PCP_CHANNEL_EVENTID = 0x182a,
- WMI_SW_TX_COMPLETE_EVENTID = 0x182b,
-
+ WMI_GET_PCP_CHANNEL_EVENTID = 0x182A,
+ WMI_SW_TX_COMPLETE_EVENTID = 0x182B,
WMI_READ_MAC_RXQ_EVENTID = 0x1830,
WMI_READ_MAC_TXQ_EVENTID = 0x1831,
WMI_WRITE_MAC_RXQ_EVENTID = 0x1832,
WMI_WRITE_MAC_TXQ_EVENTID = 0x1833,
WMI_WRITE_MAC_XQ_FIELD_EVENTID = 0x1834,
-
WMI_BEAMFORMING_MGMT_DONE_EVENTID = 0x1836,
WMI_BF_TXSS_MGMT_DONE_EVENTID = 0x1837,
WMI_BF_RXSS_MGMT_DONE_EVENTID = 0x1839,
@@ -914,20 +868,19 @@ enum wmi_event_id {
WMI_BF_SM_MGMT_DONE_EVENTID = 0x1838,
WMI_RX_MGMT_PACKET_EVENTID = 0x1840,
WMI_TX_MGMT_PACKET_EVENTID = 0x1841,
-
+ WMI_OTP_READ_RESULT_EVENTID = 0x1856,
+ WMI_LED_CFG_DONE_EVENTID = 0x1858,
/* Performance monitoring events */
WMI_DATA_PORT_OPEN_EVENTID = 0x1860,
WMI_WBE_LINK_DOWN_EVENTID = 0x1861,
-
WMI_BF_CTRL_DONE_EVENTID = 0x1862,
WMI_NOTIFY_REQ_DONE_EVENTID = 0x1863,
WMI_GET_STATUS_DONE_EVENTID = 0x1864,
WMI_VRING_EN_EVENTID = 0x1865,
-
WMI_UNIT_TEST_EVENTID = 0x1900,
WMI_FLASH_READ_DONE_EVENTID = 0x1902,
WMI_FLASH_WRITE_DONE_EVENTID = 0x1903,
- /*P2P*/
+ /* P2P */
WMI_P2P_CFG_DONE_EVENTID = 0x1910,
WMI_PORT_ALLOCATED_EVENTID = 0x1911,
WMI_PORT_DELETED_EVENTID = 0x1912,
@@ -937,49 +890,42 @@ enum wmi_event_id {
WMI_DISCOVERY_STOPPED_EVENTID = 0x1917,
WMI_PCP_STARTED_EVENTID = 0x1918,
WMI_PCP_STOPPED_EVENTID = 0x1919,
- WMI_PCP_FACTOR_EVENTID = 0x191a,
+ WMI_PCP_FACTOR_EVENTID = 0x191A,
WMI_SET_CHANNEL_EVENTID = 0x9000,
WMI_ASSOC_REQ_EVENTID = 0x9001,
WMI_EAPOL_RX_EVENTID = 0x9002,
WMI_MAC_ADDR_RESP_EVENTID = 0x9003,
WMI_FW_VER_EVENTID = 0x9004,
+ WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENTID = 0x9005,
};
-/*
- * Events data structures
- */
-
+/* Events data structures */
enum wmi_fw_status {
- WMI_FW_STATUS_SUCCESS,
- WMI_FW_STATUS_FAILURE,
+ WMI_FW_STATUS_SUCCESS = 0x00,
+ WMI_FW_STATUS_FAILURE = 0x01,
};
-/*
- * WMI_RF_MGMT_STATUS_EVENTID
- */
+/* WMI_RF_MGMT_STATUS_EVENTID */
enum wmi_rf_status {
- WMI_RF_ENABLED = 0,
- WMI_RF_DISABLED_HW = 1,
- WMI_RF_DISABLED_SW = 2,
- WMI_RF_DISABLED_HW_SW = 3,
+ WMI_RF_ENABLED = 0x00,
+ WMI_RF_DISABLED_HW = 0x01,
+ WMI_RF_DISABLED_SW = 0x02,
+ WMI_RF_DISABLED_HW_SW = 0x03,
};
+/* WMI_RF_MGMT_STATUS_EVENTID */
struct wmi_rf_mgmt_status_event {
__le32 rf_status;
} __packed;
-/*
- * WMI_THERMAL_THROTTLING_STATUS_EVENTID
- */
+/* WMI_THERMAL_THROTTLING_STATUS_EVENTID */
struct wmi_thermal_throttling_status_event {
__le32 time_on_usec;
__le32 time_off_usec;
__le32 max_txop_length_usec;
} __packed;
-/*
- * WMI_GET_STATUS_DONE_EVENTID
- */
+/* WMI_GET_STATUS_DONE_EVENTID */
struct wmi_get_status_done_event {
__le32 is_associated;
u8 cid;
@@ -995,9 +941,7 @@ struct wmi_get_status_done_event {
__le32 is_secured;
} __packed;
-/*
- * WMI_FW_VER_EVENTID
- */
+/* WMI_FW_VER_EVENTID */
struct wmi_fw_ver_event {
u8 major;
u8 minor;
@@ -1005,9 +949,7 @@ struct wmi_fw_ver_event {
__le16 build;
} __packed;
-/*
-* WMI_MAC_ADDR_RESP_EVENTID
-*/
+/* WMI_MAC_ADDR_RESP_EVENTID */
struct wmi_mac_addr_resp_event {
u8 mac[WMI_MAC_LEN];
u8 auth_mode;
@@ -1015,42 +957,38 @@ struct wmi_mac_addr_resp_event {
__le32 offload_mode;
} __packed;
-/*
-* WMI_EAPOL_RX_EVENTID
-*/
+/* WMI_EAPOL_RX_EVENTID */
struct wmi_eapol_rx_event {
u8 src_mac[WMI_MAC_LEN];
__le16 eapol_len;
u8 eapol[0];
} __packed;
-/*
-* WMI_READY_EVENTID
-*/
+/* WMI_READY_EVENTID */
enum wmi_phy_capability {
- WMI_11A_CAPABILITY = 1,
- WMI_11G_CAPABILITY = 2,
- WMI_11AG_CAPABILITY = 3,
- WMI_11NA_CAPABILITY = 4,
- WMI_11NG_CAPABILITY = 5,
- WMI_11NAG_CAPABILITY = 6,
- WMI_11AD_CAPABILITY = 7,
- WMI_11N_CAPABILITY_OFFSET = WMI_11NA_CAPABILITY - WMI_11A_CAPABILITY,
+ WMI_11A_CAPABILITY = 0x01,
+ WMI_11G_CAPABILITY = 0x02,
+ WMI_11AG_CAPABILITY = 0x03,
+ WMI_11NA_CAPABILITY = 0x04,
+ WMI_11NG_CAPABILITY = 0x05,
+ WMI_11NAG_CAPABILITY = 0x06,
+ WMI_11AD_CAPABILITY = 0x07,
+ WMI_11N_CAPABILITY_OFFSET = 0x03,
};
struct wmi_ready_event {
__le32 sw_version;
__le32 abi_version;
u8 mac[WMI_MAC_LEN];
- u8 phy_capability; /* enum wmi_phy_capability */
+ /* enum wmi_phy_capability */
+ u8 phy_capability;
u8 numof_additional_mids;
} __packed;
-/*
- * WMI_NOTIFY_REQ_DONE_EVENTID
- */
+/* WMI_NOTIFY_REQ_DONE_EVENTID */
struct wmi_notify_req_done_event {
- __le32 status; /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ /* beamforming status, 0: fail; 1: OK; 2: retrying */
+ __le32 status;
__le64 tsf;
__le32 snr_val;
__le32 tx_tpt;
@@ -1066,9 +1004,7 @@ struct wmi_notify_req_done_event {
u8 reserved[3];
} __packed;
-/*
- * WMI_CONNECT_EVENTID
- */
+/* WMI_CONNECT_EVENTID */
struct wmi_connect_event {
u8 channel;
u8 reserved0;
@@ -1082,68 +1018,103 @@ struct wmi_connect_event {
u8 assoc_resp_len;
u8 cid;
u8 reserved2[3];
+ /* not in use */
u8 assoc_info[0];
} __packed;
-/*
- * WMI_DISCONNECT_EVENTID
- */
+/* WMI_DISCONNECT_EVENTID */
enum wmi_disconnect_reason {
- WMI_DIS_REASON_NO_NETWORK_AVAIL = 1,
- WMI_DIS_REASON_LOST_LINK = 2, /* bmiss */
- WMI_DIS_REASON_DISCONNECT_CMD = 3,
- WMI_DIS_REASON_BSS_DISCONNECTED = 4,
- WMI_DIS_REASON_AUTH_FAILED = 5,
- WMI_DIS_REASON_ASSOC_FAILED = 6,
- WMI_DIS_REASON_NO_RESOURCES_AVAIL = 7,
- WMI_DIS_REASON_CSERV_DISCONNECT = 8,
- WMI_DIS_REASON_INVALID_PROFILE = 10,
- WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 11,
- WMI_DIS_REASON_PROFILE_MISMATCH = 12,
- WMI_DIS_REASON_CONNECTION_EVICTED = 13,
- WMI_DIS_REASON_IBSS_MERGE = 14,
+ WMI_DIS_REASON_NO_NETWORK_AVAIL = 0x01,
+ /* bmiss */
+ WMI_DIS_REASON_LOST_LINK = 0x02,
+ WMI_DIS_REASON_DISCONNECT_CMD = 0x03,
+ WMI_DIS_REASON_BSS_DISCONNECTED = 0x04,
+ WMI_DIS_REASON_AUTH_FAILED = 0x05,
+ WMI_DIS_REASON_ASSOC_FAILED = 0x06,
+ WMI_DIS_REASON_NO_RESOURCES_AVAIL = 0x07,
+ WMI_DIS_REASON_CSERV_DISCONNECT = 0x08,
+ WMI_DIS_REASON_INVALID_PROFILE = 0x0A,
+ WMI_DIS_REASON_DOT11H_CHANNEL_SWITCH = 0x0B,
+ WMI_DIS_REASON_PROFILE_MISMATCH = 0x0C,
+ WMI_DIS_REASON_CONNECTION_EVICTED = 0x0D,
+ WMI_DIS_REASON_IBSS_MERGE = 0x0E,
};
struct wmi_disconnect_event {
- __le16 protocol_reason_status; /* reason code, see 802.11 spec. */
- u8 bssid[WMI_MAC_LEN]; /* set if known */
- u8 disconnect_reason; /* see wmi_disconnect_reason */
- u8 assoc_resp_len; /* not used */
- u8 assoc_info[0]; /* not used */
+ /* reason code, see 802.11 spec. */
+ __le16 protocol_reason_status;
+ /* set if known */
+ u8 bssid[WMI_MAC_LEN];
+ /* see enum wmi_disconnect_reason */
+ u8 disconnect_reason;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_resp_len;
+ /* last assoc req may passed to host - not in used */
+ u8 assoc_info[0];
} __packed;
-/*
- * WMI_SCAN_COMPLETE_EVENTID
- */
+/* WMI_SCAN_COMPLETE_EVENTID */
enum scan_status {
- WMI_SCAN_SUCCESS = 0,
- WMI_SCAN_FAILED = 1,
- WMI_SCAN_ABORTED = 2,
- WMI_SCAN_REJECTED = 3,
+ WMI_SCAN_SUCCESS = 0x00,
+ WMI_SCAN_FAILED = 0x01,
+ WMI_SCAN_ABORTED = 0x02,
+ WMI_SCAN_REJECTED = 0x03,
+ WMI_SCAN_ABORT_REJECTED = 0x04,
};
struct wmi_scan_complete_event {
- __le32 status; /* scan_status */
+ /* enum scan_status */
+ __le32 status;
} __packed;
-/*
- * WMI_BA_STATUS_EVENTID
- */
+/* WMI_ACS_PASSIVE_SCAN_COMPLETE_EVENT */
+enum wmi_acs_info_bitmask {
+ WMI_ACS_INFO_BITMASK_BEACON_FOUND = 0x01,
+ WMI_ACS_INFO_BITMASK_BUSY_TIME = 0x02,
+ WMI_ACS_INFO_BITMASK_TX_TIME = 0x04,
+ WMI_ACS_INFO_BITMASK_RX_TIME = 0x08,
+ WMI_ACS_INFO_BITMASK_NOISE = 0x10,
+};
+
+struct scan_acs_info {
+ u8 channel;
+ u8 beacon_found;
+ /* msec */
+ __le16 busy_time;
+ __le16 tx_time;
+ __le16 rx_time;
+ u8 noise;
+ u8 reserved[3];
+} __packed;
+
+struct wmi_acs_passive_scan_complete_event {
+ __le32 dwell_time;
+ /* valid fields within channel info according to
+ * their appearance in struct order
+ */
+ __le16 filled;
+ u8 num_scanned_channels;
+ u8 reserved;
+ struct scan_acs_info scan_info_list[0];
+} __packed;
+
+/* WMI_BA_STATUS_EVENTID */
enum wmi_vring_ba_status {
- WMI_BA_AGREED = 0,
- WMI_BA_NON_AGREED = 1,
+ WMI_BA_AGREED = 0x00,
+ WMI_BA_NON_AGREED = 0x01,
/* BA_EN in middle of teardown flow */
- WMI_BA_TD_WIP = 2,
+ WMI_BA_TD_WIP = 0x02,
/* BA_DIS or BA_EN in middle of BA SETUP flow */
- WMI_BA_SETUP_WIP = 3,
+ WMI_BA_SETUP_WIP = 0x03,
/* BA_EN when the BA session is already active */
- WMI_BA_SESSION_ACTIVE = 4,
+ WMI_BA_SESSION_ACTIVE = 0x04,
/* BA_DIS when the BA session is not active */
- WMI_BA_SESSION_NOT_ACTIVE = 5,
+ WMI_BA_SESSION_NOT_ACTIVE = 0x05,
};
-struct wmi_vring_ba_status_event {
- __le16 status; /* enum wmi_vring_ba_status */
+struct wmi_ba_status_event {
+ /* enum wmi_vring_ba_status */
+ __le16 status;
u8 reserved[2];
u8 ringid;
u8 agg_wsize;
@@ -1151,18 +1122,14 @@ struct wmi_vring_ba_status_event {
u8 amsdu;
} __packed;
-/*
- * WMI_DELBA_EVENTID
- */
+/* WMI_DELBA_EVENTID */
struct wmi_delba_event {
u8 cidxtid;
u8 from_initiator;
__le16 reason;
} __packed;
-/*
- * WMI_VRING_CFG_DONE_EVENTID
- */
+/* WMI_VRING_CFG_DONE_EVENTID */
struct wmi_vring_cfg_done_event {
u8 ringid;
u8 status;
@@ -1170,174 +1137,151 @@ struct wmi_vring_cfg_done_event {
__le32 tx_vring_tail_ptr;
} __packed;
-/*
- * WMI_RCP_ADDBA_RESP_SENT_EVENTID
- */
+/* WMI_RCP_ADDBA_RESP_SENT_EVENTID */
struct wmi_rcp_addba_resp_sent_event {
u8 cidxtid;
u8 reserved;
__le16 status;
} __packed;
-/*
- * WMI_RCP_ADDBA_REQ_EVENTID
- */
+/* WMI_RCP_ADDBA_REQ_EVENTID */
struct wmi_rcp_addba_req_event {
u8 cidxtid;
u8 dialog_token;
- __le16 ba_param_set; /* ieee80211_ba_parameterset as it received */
+ /* ieee80211_ba_parameterset as it received */
+ __le16 ba_param_set;
__le16 ba_timeout;
- __le16 ba_seq_ctrl; /* ieee80211_ba_seqstrl field as it received */
+ /* ieee80211_ba_seqstrl field as it received */
+ __le16 ba_seq_ctrl;
} __packed;
-/*
- * WMI_CFG_RX_CHAIN_DONE_EVENTID
- */
+/* WMI_CFG_RX_CHAIN_DONE_EVENTID */
enum wmi_cfg_rx_chain_done_event_status {
- WMI_CFG_RX_CHAIN_SUCCESS = 1,
+ WMI_CFG_RX_CHAIN_SUCCESS = 0x01,
};
struct wmi_cfg_rx_chain_done_event {
- __le32 rx_ring_tail_ptr; /* Rx V-Ring Tail pointer */
+ /* V-Ring Tail pointer */
+ __le32 rx_ring_tail_ptr;
__le32 status;
} __packed;
-/*
- * WMI_WBE_LINK_DOWN_EVENTID
- */
+/* WMI_WBE_LINK_DOWN_EVENTID */
enum wmi_wbe_link_down_event_reason {
- WMI_WBE_REASON_USER_REQUEST = 0,
- WMI_WBE_REASON_RX_DISASSOC = 1,
- WMI_WBE_REASON_BAD_PHY_LINK = 2,
+ WMI_WBE_REASON_USER_REQUEST = 0x00,
+ WMI_WBE_REASON_RX_DISASSOC = 0x01,
+ WMI_WBE_REASON_BAD_PHY_LINK = 0x02,
};
+/* WMI_WBE_LINK_DOWN_EVENTID */
struct wmi_wbe_link_down_event {
u8 cid;
u8 reserved[3];
__le32 reason;
} __packed;
-/*
- * WMI_DATA_PORT_OPEN_EVENTID
- */
+/* WMI_DATA_PORT_OPEN_EVENTID */
struct wmi_data_port_open_event {
u8 cid;
u8 reserved[3];
} __packed;
-/*
- * WMI_VRING_EN_EVENTID
- */
+/* WMI_VRING_EN_EVENTID */
struct wmi_vring_en_event {
u8 vring_index;
u8 reserved[3];
} __packed;
-/*
- * WMI_GET_PCP_CHANNEL_EVENTID
- */
+/* WMI_GET_PCP_CHANNEL_EVENTID */
struct wmi_get_pcp_channel_event {
u8 channel;
u8 reserved[3];
} __packed;
-/*
- * WMI_P2P_CFG_DONE_EVENTID
- */
+/* WMI_P2P_CFG_DONE_EVENTID */
struct wmi_p2p_cfg_done_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
-* WMI_PORT_ALLOCATED_EVENTID
-*/
+/* WMI_PORT_ALLOCATED_EVENTID */
struct wmi_port_allocated_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
-* WMI_PORT_DELETED_EVENTID
-*/
+/* WMI_PORT_DELETED_EVENTID */
struct wmi_port_deleted_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_LISTEN_STARTED_EVENTID
- */
+/* WMI_LISTEN_STARTED_EVENTID */
struct wmi_listen_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_SEARCH_STARTED_EVENTID
- */
+/* WMI_SEARCH_STARTED_EVENTID */
struct wmi_search_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_STARTED_EVENTID
- */
+/* WMI_PCP_STARTED_EVENTID */
struct wmi_pcp_started_event {
- u8 status; /* wmi_fw_status */
+ /* wmi_fw_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_PCP_FACTOR_EVENTID
- */
+/* WMI_PCP_FACTOR_EVENTID */
struct wmi_pcp_factor_event {
__le32 pcp_factor;
} __packed;
-/*
- * WMI_SW_TX_COMPLETE_EVENTID
- */
enum wmi_sw_tx_status {
- WMI_TX_SW_STATUS_SUCCESS = 0,
- WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 1,
- WMI_TX_SW_STATUS_FAILED_TX = 2,
+ WMI_TX_SW_STATUS_SUCCESS = 0x00,
+ WMI_TX_SW_STATUS_FAILED_NO_RESOURCES = 0x01,
+ WMI_TX_SW_STATUS_FAILED_TX = 0x02,
};
+/* WMI_SW_TX_COMPLETE_EVENTID */
struct wmi_sw_tx_complete_event {
- u8 status; /* enum wmi_sw_tx_status */
+ /* enum wmi_sw_tx_status */
+ u8 status;
u8 reserved[3];
} __packed;
-/*
- * WMI_CORR_MEASURE_EVENTID
- */
+/* WMI_CORR_MEASURE_EVENTID */
struct wmi_corr_measure_event {
- s32 i;
- s32 q;
- s32 image_i;
- s32 image_q;
+ /* signed */
+ __le32 i;
+ /* signed */
+ __le32 q;
+ /* signed */
+ __le32 image_i;
+ /* signed */
+ __le32 image_q;
} __packed;
-/*
- * WMI_READ_RSSI_EVENTID
- */
+/* WMI_READ_RSSI_EVENTID */
struct wmi_read_rssi_event {
__le32 ina_rssi_adc_dbm;
} __packed;
-/*
- * WMI_GET_SSID_EVENTID
- */
+/* WMI_GET_SSID_EVENTID */
struct wmi_get_ssid_event {
__le32 ssid_len;
u8 ssid[WMI_MAX_SSID_LEN];
} __packed;
-/*
- * WMI_RX_MGMT_PACKET_EVENTID
- */
+/* wmi_rx_mgmt_info */
struct wmi_rx_mgmt_info {
u8 mcs;
s8 snr;
@@ -1346,39 +1290,124 @@ struct wmi_rx_mgmt_info {
__le16 stype;
__le16 status;
__le32 len;
+ /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
u8 qid;
+ /* Not resolved when == 0xFFFFFFFF ==> Broadcast to all MIDS */
u8 mid;
u8 cid;
- u8 channel; /* From Radio MNGR */
+ /* From Radio MNGR */
+ u8 channel;
} __packed;
-/*
- * WMI_TX_MGMT_PACKET_EVENTID
- */
+/* wmi_otp_read_write_cmd */
+struct wmi_otp_read_write_cmd {
+ __le32 addr;
+ __le32 size;
+ u8 values[0];
+} __packed;
+
+/* WMI_OTP_READ_RESULT_EVENTID */
+struct wmi_otp_read_result_event {
+ u8 payload[0];
+} __packed;
+
+/* WMI_TX_MGMT_PACKET_EVENTID */
struct wmi_tx_mgmt_packet_event {
u8 payload[0];
} __packed;
+/* WMI_RX_MGMT_PACKET_EVENTID */
struct wmi_rx_mgmt_packet_event {
struct wmi_rx_mgmt_info info;
u8 payload[0];
} __packed;
-/*
- * WMI_ECHO_RSP_EVENTID
- */
-struct wmi_echo_event {
+/* WMI_ECHO_RSP_EVENTID */
+struct wmi_echo_rsp_event {
__le32 echoed_value;
} __packed;
-/*
- * WMI_TEMP_SENSE_DONE_EVENTID
+/* WMI_TEMP_SENSE_DONE_EVENTID
*
* Measure MAC and radio temperatures
*/
struct wmi_temp_sense_done_event {
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000)
+ */
__le32 baseband_t1000;
+ /* Temperature times 1000 (actual temperature will be achieved by
+ * dividing the value by 1000)
+ */
__le32 rf_t1000;
} __packed;
+#define WMI_SCAN_DWELL_TIME_MS (100)
+#define WMI_SURVEY_TIMEOUT_MS (10000)
+
+enum wmi_hidden_ssid {
+ WMI_HIDDEN_SSID_DISABLED = 0x00,
+ WMI_HIDDEN_SSID_SEND_EMPTY = 0x10,
+ WMI_HIDDEN_SSID_CLEAR = 0xFE,
+};
+
+/* WMI_LED_CFG_CMDID
+ *
+ * Configure LED On\Off\Blinking operation
+ *
+ * Returned events:
+ * - WMI_LED_CFG_DONE_EVENTID
+ */
+enum led_mode {
+ LED_DISABLE = 0x00,
+ LED_ENABLE = 0x01,
+};
+
+/* The names of the led as
+ * described on HW schemes.
+ */
+enum wmi_led_id {
+ WMI_LED_WLAN = 0x00,
+ WMI_LED_WPAN = 0x01,
+ WMI_LED_WWAN = 0x02,
+};
+
+/* Led polarity mode. */
+enum wmi_led_polarity {
+ LED_POLARITY_HIGH_ACTIVE = 0x00,
+ LED_POLARITY_LOW_ACTIVE = 0x01,
+};
+
+/* Combination of on and off
+ * creates the blinking period
+ */
+struct wmi_led_blink_mode {
+ __le32 blink_on;
+ __le32 blink_off;
+} __packed;
+
+/* WMI_LED_CFG_CMDID */
+struct wmi_led_cfg_cmd {
+ /* enum led_mode_e */
+ u8 led_mode;
+ /* enum wmi_led_id_e */
+ u8 id;
+ /* slow speed blinking combination */
+ struct wmi_led_blink_mode slow_blink_cfg;
+ /* medium speed blinking combination */
+ struct wmi_led_blink_mode medium_blink_cfg;
+ /* high speed blinking combination */
+ struct wmi_led_blink_mode fast_blink_cfg;
+ /* polarity of the led */
+ u8 led_polarity;
+ /* reserved */
+ u8 reserved;
+} __packed;
+
+/* WMI_LED_CFG_DONE_EVENTID */
+struct wmi_led_cfg_done_event {
+ /* led config status */
+ __le32 status;
+} __packed;
+
#endif /* __WILOCITY_WMI_H__ */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index bb1ed0ebd..488c08c30 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -1540,7 +1540,7 @@ static inline int at76_guess_freq(struct at76_priv *priv)
channel = el[2];
exit:
- return ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
}
static void at76_rx_tasklet(unsigned long param)
@@ -1583,7 +1583,7 @@ static void at76_rx_tasklet(unsigned long param)
rx_status.signal = buf->rssi;
rx_status.flag |= RX_FLAG_DECRYPTED;
rx_status.flag |= RX_FLAG_IV_STRIPPED;
- rx_status.band = IEEE80211_BAND_2GHZ;
+ rx_status.band = NL80211_BAND_2GHZ;
rx_status.freq = at76_guess_freq(priv);
at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
@@ -2352,7 +2352,7 @@ static int at76_init_new_device(struct at76_priv *priv,
priv->hw->wiphy->max_scan_ssids = 1;
priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &at76_supported_band;
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
priv->hw->max_signal = 100;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index ae6dc6fac..b6107aea6 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -2260,7 +2260,7 @@ static int atmel_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
- if ((fwrq->m > 1000) || (fwrq->e > 0))
+ if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
@@ -2419,7 +2419,7 @@ static int atmel_get_range(struct net_device *dev,
/* Values in MHz -> * 10^5 * 10 */
range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
range->freq[k++].e = 1;
}
range->num_frequency = k;
diff --git a/drivers/net/wireless/broadcom/b43/b43.h b/drivers/net/wireless/broadcom/b43/b43.h
index 036552439..d7d42f0b8 100644
--- a/drivers/net/wireless/broadcom/b43/b43.h
+++ b/drivers/net/wireless/broadcom/b43/b43.h
@@ -992,9 +992,9 @@ static inline int b43_is_mode(struct b43_wl *wl, int type)
/**
* b43_current_band - Returns the currently used band.
- * Returns one of IEEE80211_BAND_2GHZ and IEEE80211_BAND_5GHZ.
+ * Returns one of NL80211_BAND_2GHZ and NL80211_BAND_5GHZ.
*/
-static inline enum ieee80211_band b43_current_band(struct b43_wl *wl)
+static inline enum nl80211_band b43_current_band(struct b43_wl *wl)
{
return wl->hw->conf.chandef.chan->band;
}
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index 97774b338..f8c7aa9db 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -181,7 +181,7 @@ static struct ieee80211_rate __b43_ratetable[] = {
#define b43_g_ratetable_size 12
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -210,7 +210,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
#undef CHAN2G
#define CHAN4G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 4000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -218,7 +218,7 @@ static struct ieee80211_channel b43_2ghz_chantable[] = {
.max_power = 30, \
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -317,7 +317,7 @@ static struct ieee80211_channel b43_5ghz_aphy_chantable[] = {
#undef CHAN5G
static struct ieee80211_supported_band b43_band_5GHz_nphy = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_nphy_chantable,
.n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable),
.bitrates = b43_a_ratetable,
@@ -325,7 +325,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy = {
};
static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_nphy_chantable_limited,
.n_channels = ARRAY_SIZE(b43_5ghz_nphy_chantable_limited),
.bitrates = b43_a_ratetable,
@@ -333,7 +333,7 @@ static struct ieee80211_supported_band b43_band_5GHz_nphy_limited = {
};
static struct ieee80211_supported_band b43_band_5GHz_aphy = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = b43_5ghz_aphy_chantable,
.n_channels = ARRAY_SIZE(b43_5ghz_aphy_chantable),
.bitrates = b43_a_ratetable,
@@ -341,7 +341,7 @@ static struct ieee80211_supported_band b43_band_5GHz_aphy = {
};
static struct ieee80211_supported_band b43_band_2GHz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = b43_2ghz_chantable,
.n_channels = ARRAY_SIZE(b43_2ghz_chantable),
.bitrates = b43_g_ratetable,
@@ -349,7 +349,7 @@ static struct ieee80211_supported_band b43_band_2GHz = {
};
static struct ieee80211_supported_band b43_band_2ghz_limited = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = b43_2ghz_chantable,
.n_channels = b43_2ghz_chantable_limited_size,
.bitrates = b43_g_ratetable,
@@ -711,7 +711,7 @@ static void b43_set_slot_time(struct b43_wldev *dev, u16 slot_time)
{
/* slot_time is in usec. */
/* This test used to exit for all but a G PHY. */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
return;
b43_write16(dev, B43_MMIO_IFSSLOT, 510 + slot_time);
/* Shared memory location 0x0010 is the slot time and should be
@@ -3876,12 +3876,12 @@ static void b43_op_set_tsf(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static const char *band_to_string(enum ieee80211_band band)
+static const char *band_to_string(enum nl80211_band band)
{
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
return "5";
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
return "2.4";
default:
break;
@@ -3899,10 +3899,10 @@ static int b43_switch_band(struct b43_wldev *dev,
u32 tmp;
switch (chan->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
gmode = false;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
gmode = true;
break;
default:
@@ -5290,16 +5290,16 @@ static int b43_setup_bands(struct b43_wldev *dev,
phy->radio_rev == 9;
if (have_2ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = limited_2g ?
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = limited_2g ?
&b43_band_2ghz_limited : &b43_band_2GHz;
if (dev->phy.type == B43_PHYTYPE_N) {
if (have_5ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = limited_5g ?
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = limited_5g ?
&b43_band_5GHz_nphy_limited :
&b43_band_5GHz_nphy;
} else {
if (have_5ghz_phy)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &b43_band_5GHz_aphy;
}
dev->phy.supports_2ghz = have_2ghz_phy;
diff --git a/drivers/net/wireless/broadcom/b43/phy_ac.c b/drivers/net/wireless/broadcom/b43/phy_ac.c
index e75633d67..52f8abad8 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ac.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ac.c
@@ -61,7 +61,7 @@ static void b43_phy_ac_op_radio_write(struct b43_wldev *dev, u16 reg,
static unsigned int b43_phy_ac_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 11;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c
index ec2b9c577..85f2ca989 100644
--- a/drivers/net/wireless/broadcom/b43/phy_common.c
+++ b/drivers/net/wireless/broadcom/b43/phy_common.c
@@ -436,7 +436,7 @@ int b43_switch_channel(struct b43_wldev *dev, unsigned int new_channel)
* firmware from sending ghost packets.
*/
channelcookie = new_channel;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
channelcookie |= B43_SHM_SH_CHAN_5GHZ;
/* FIXME: set 40Mhz flag if required */
if (0)
diff --git a/drivers/net/wireless/broadcom/b43/phy_ht.c b/drivers/net/wireless/broadcom/b43/phy_ht.c
index bd6894596..718c90e81 100644
--- a/drivers/net/wireless/broadcom/b43/phy_ht.c
+++ b/drivers/net/wireless/broadcom/b43/phy_ht.c
@@ -568,7 +568,7 @@ static void b43_phy_ht_tx_power_ctl(struct b43_wldev *dev, bool enable)
} else {
b43_phy_set(dev, B43_PHY_HT_TXPCTL_CMD_C1, en_bits);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
for (i = 0; i < 3; i++)
b43_phy_write(dev, cmd_regs[i], 0x32);
}
@@ -643,7 +643,7 @@ static void b43_phy_ht_tx_power_ctl_setup(struct b43_wldev *dev)
u16 freq = dev->phy.chandef->chan->center_freq;
int i, c;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (c = 0; c < 3; c++) {
target[c] = sprom->core_pwr_info[c].maxpwr_2g;
a1[c] = sprom->core_pwr_info[c].pa_2g[0];
@@ -777,7 +777,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
const struct b43_phy_ht_channeltab_e_phy *e,
struct ieee80211_channel *new_channel)
{
- if (new_channel->band == IEEE80211_BAND_5GHZ) {
+ if (new_channel->band == NL80211_BAND_5GHZ) {
/* Switch to 2 GHz for a moment to access B-PHY regs */
b43_phy_mask(dev, B43_PHY_HT_BANDCTL, ~B43_PHY_HT_BANDCTL_5GHZ);
@@ -805,7 +805,7 @@ static void b43_phy_ht_channel_setup(struct b43_wldev *dev,
} else {
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_OFDM_EN,
B43_PHY_HT_CLASS_CTL_OFDM_EN);
- if (new_channel->band == IEEE80211_BAND_2GHZ)
+ if (new_channel->band == NL80211_BAND_2GHZ)
b43_phy_mask(dev, B43_PHY_HT_TEST, ~0x840);
}
@@ -916,7 +916,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
if (0) /* TODO: condition */
; /* TODO: PHY op on reg 0x217 */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
else
b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
@@ -1005,7 +1005,7 @@ static int b43_phy_ht_op_init(struct b43_wldev *dev)
b43_phy_ht_classifier(dev, 0, 0);
b43_phy_ht_read_clip_detection(dev, clip_state);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_ht_bphy_init(dev);
b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
@@ -1077,7 +1077,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -1089,7 +1089,7 @@ static int b43_phy_ht_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_phy_ht_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 11;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lcn.c b/drivers/net/wireless/broadcom/b43/phy_lcn.c
index 97461ccf3..63bd29f07 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lcn.c
@@ -108,7 +108,7 @@ static void b43_radio_2064_channel_setup(struct b43_wldev *dev)
/* wlc_radio_2064_init */
static void b43_radio_2064_init(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, 0x09c, 0x0020);
b43_radio_write(dev, 0x105, 0x0008);
} else {
@@ -535,7 +535,7 @@ static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
b43_mac_suspend(dev);
if (!dev->phy.lcn->hw_pwr_ctl_capable) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
tx_gains.gm_gain = 4;
tx_gains.pga_gain = 12;
tx_gains.pad_gain = 12;
@@ -720,7 +720,7 @@ static int b43_phy_lcn_op_init(struct b43_wldev *dev)
else
B43_WARN_ON(1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_lcn_tx_pwr_ctl_init(dev);
b43_switch_channel(dev, dev->phy.channel);
@@ -779,7 +779,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -791,7 +791,7 @@ static int b43_phy_lcn_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_phy_lcn_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
index 058a9f232..6922cbb99 100644
--- a/drivers/net/wireless/broadcom/b43/phy_lp.c
+++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
@@ -46,7 +46,7 @@ static inline u16 channel2freq_lp(u8 channel)
static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
@@ -91,7 +91,7 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
u32 ofdmpo;
int i;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
lpphy->tx_isolation_med_band = sprom->tri2g;
lpphy->bx_arch = sprom->bxa2g;
lpphy->rx_pwr_offset = sprom->rxpo2g;
@@ -174,7 +174,7 @@ static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq)
B43_WARN_ON(dev->phy.rev >= 2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
isolation = lpphy->tx_isolation_med_band;
else if (freq <= 5320)
isolation = lpphy->tx_isolation_low_band;
@@ -238,7 +238,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
0xFF00, lpphy->rx_pwr_offset);
if ((sprom->boardflags_lo & B43_BFL_FEM) &&
- ((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ ((b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
(sprom->boardflags_hi & B43_BFH_PAREF))) {
ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
ssb_pmu_set_ldo_paref(&bus->chipco, true);
@@ -280,7 +280,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
- } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
+ } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ ||
(dev->dev->board_type == SSB_BOARD_BU4312) ||
(dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
@@ -326,7 +326,7 @@ static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
//FIXME the Broadcom driver caches & delays this HF write!
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
@@ -466,7 +466,7 @@ static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
@@ -547,7 +547,7 @@ static void lpphy_2062_init(struct b43_wldev *dev)
b43_radio_write(dev, B2062_S_BG_CTL1,
(b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
else
b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
@@ -746,7 +746,7 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
0xFF1F, 0x60);
else
@@ -807,7 +807,7 @@ static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
if (dev->phy.rev >= 2) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
}
@@ -823,7 +823,7 @@ static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
if (dev->phy.rev >= 2) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
}
@@ -951,7 +951,7 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
0xFBFF, ext_lna << 10);
b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
tmp = (gain >> 2) & 0x3;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xE7FF, tmp<<11);
@@ -1344,7 +1344,7 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
if (dev->phy.rev >= 2) {
lpphy_rev2plus_rc_calib(dev);
} else if (!lpphy->rc_cap) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_rev0_1_rc_calib(dev);
} else {
lpphy_set_rc_cap(dev);
@@ -1548,7 +1548,7 @@ static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev)
{
struct lpphy_tx_gains gains;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
gains.gm = 4;
gains.pad = 12;
gains.pga = 12;
@@ -1902,7 +1902,7 @@ static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
lpphy_set_trsw_over(dev, tx, rx);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
0xFFF7, pa << 3);
diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
index 9f0bcf3b8..a5557d706 100644
--- a/drivers/net/wireless/broadcom/b43/phy_n.c
+++ b/drivers/net/wireless/broadcom/b43/phy_n.c
@@ -105,9 +105,9 @@ enum n_rail_type {
static inline bool b43_nphy_ipa(struct b43_wldev *dev)
{
- enum ieee80211_band band = b43_current_band(dev->wl);
- return ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
- (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
+ enum nl80211_band band = b43_current_band(dev->wl);
+ return ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ));
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCoreGetState */
@@ -357,7 +357,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
break;
case N_INTC_OVERRIDE_PA:
tmp = 0x0030;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
val = value << 5;
else
val = value << 4;
@@ -365,7 +365,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
b43_phy_set(dev, reg, 0x1000);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0001;
tmp2 = 0x0004;
val = value;
@@ -378,7 +378,7 @@ static void b43_nphy_rf_ctl_intc_override_rev7(struct b43_wldev *dev,
b43_phy_mask(dev, reg, ~tmp2);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0002;
tmp2 = 0x0008;
val = value << 1;
@@ -465,7 +465,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
}
break;
case N_INTC_OVERRIDE_PA:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0020;
val = value << 5;
} else {
@@ -475,7 +475,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
b43_phy_maskset(dev, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_PU:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0001;
val = value;
} else {
@@ -485,7 +485,7 @@ static void b43_nphy_rf_ctl_intc_override(struct b43_wldev *dev,
b43_phy_maskset(dev, reg, ~tmp, val);
break;
case N_INTC_OVERRIDE_EXT_LNA_GAIN:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
tmp = 0x0002;
val = value << 1;
} else {
@@ -600,7 +600,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
b43_nphy_stay_in_carrier_search(dev, 1);
if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
gain[0] = 6;
gain[1] = 6;
} else {
@@ -736,7 +736,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
switch (phy->radio_rev) {
case 0 ... 4:
case 6:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_R1, 0x3f);
b43_radio_write(dev, R2057_CP_KPD_IDAC, 0x3f);
b43_radio_write(dev, R2057_RFPLL_LOOPFILTER_C1, 0x8);
@@ -751,7 +751,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
case 9: /* e.g. PHY rev 16 */
b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x20);
b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x18);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, R2057_LOGEN_PTAT_RESETS, 0x38);
b43_radio_write(dev, R2057_VCOBUF_IDACS, 0x0f);
@@ -775,7 +775,7 @@ static void b43_radio_2057_setup(struct b43_wldev *dev,
break;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
u16 txmix2g_tune_boost_pu = 0;
u16 pad2g_tune_pus = 0;
@@ -1135,7 +1135,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
u16 offset;
u8 i;
u16 bias, cbias;
@@ -1152,10 +1152,10 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
dev->dev->chip_pkg == BCMA_PKG_ID_BCM43224_FAB_SMIC);
b43_chantab_radio_2056_upload(dev, e);
- b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+ b2056_upload_syn_pll_cp2(dev, band == NL80211_BAND_5GHZ);
if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
if (dev->dev->chip_id == BCMA_CHIP_ID_BCM4716 ||
@@ -1168,21 +1168,21 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
}
}
if (sprom->boardflags2_hi & B43_BFH2_GPLL_WAR2 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1f);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0b);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x20);
}
if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
}
- if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) {
for (i = 0; i < 2; i++) {
offset = i ? B2056_TX1 : B2056_TX0;
if (dev->phy.rev >= 5) {
@@ -1244,7 +1244,7 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
}
b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
}
- } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ } else if (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ) {
u16 freq = phy->chandef->chan->center_freq;
if (freq < 5100) {
paa_boost = 0xA;
@@ -1501,7 +1501,7 @@ static void b43_radio_init2055(struct b43_wldev *dev)
/* Follow wl, not specs. Do not force uploading all regs */
b2055_upload_inittab(dev, 0, 0);
} else {
- bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
+ bool ghz5 = b43_current_band(dev->wl) == NL80211_BAND_5GHZ;
b2055_upload_inittab(dev, ghz5, 0);
}
b43_radio_init2055_post(dev);
@@ -1785,7 +1785,7 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
b43_phy_maskset(dev, reg, 0xFFC3, 0);
if (rssi_type == N_RSSI_W1)
- val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ val = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 4 : 8;
else if (rssi_type == N_RSSI_W2)
val = 16;
else
@@ -1813,12 +1813,12 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code,
if (rssi_type != N_RSSI_IQ &&
rssi_type != N_RSSI_TBD) {
- enum ieee80211_band band =
+ enum nl80211_band band =
b43_current_band(dev->wl);
if (dev->phy.rev < 7) {
if (b43_nphy_ipa(dev))
- val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ val = (band == NL80211_BAND_5GHZ) ? 0xC : 0xE;
else
val = 0x11;
reg = (i == 0) ? B2056_TX0 : B2056_TX1;
@@ -2120,7 +2120,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
1, 0, false);
b43_nphy_rf_ctl_override_rev7(dev, 0x80, 1, 0, false, 0);
b43_nphy_rf_ctl_override_rev7(dev, 0x40, 1, 0, false, 0);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override_rev7(dev, 0x20, 0, 0, false,
0);
b43_nphy_rf_ctl_override_rev7(dev, 0x10, 1, 0, false,
@@ -2136,7 +2136,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_nphy_rf_ctl_override(dev, 0x2, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x80, 1, 0, false);
b43_nphy_rf_ctl_override(dev, 0x40, 1, 0, false);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_nphy_rf_ctl_override(dev, 0x20, 0, 0, false);
b43_nphy_rf_ctl_override(dev, 0x10, 1, 0, false);
} else {
@@ -2257,7 +2257,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
b43_phy_write(dev, regs_to_store[i], saved_regs_phy[i]);
/* Store for future configuration */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
} else {
@@ -2289,7 +2289,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
rssical_phy_regs[11] = b43_phy_read(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y);
/* Remember for which channel we store configuration */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
nphy->rssical_chanspec_2G.center_freq = phy->chandef->chan->center_freq;
else
nphy->rssical_chanspec_5G.center_freq = phy->chandef->chan->center_freq;
@@ -2336,7 +2336,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, enum n_rssi_type type)
b43_nphy_read_clip_detection(dev, clip_state);
b43_nphy_write_clip_detection(dev, clip_off);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
override = 0x140;
else
override = 0x110;
@@ -2629,7 +2629,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ &&
b43_is_40mhz(dev))
code = 4;
else
@@ -2688,7 +2688,7 @@ static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
}
@@ -2803,7 +2803,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
scap_val = b43_radio_read(dev, R2057_RCCAL_SCAP_VAL);
if (b43_nphy_ipa(dev)) {
- bool ghz2 = b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ;
+ bool ghz2 = b43_current_band(dev->wl) == NL80211_BAND_2GHZ;
switch (phy->radio_rev) {
case 5:
@@ -2831,7 +2831,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
bcap_val_11b[core] = bcap_val;
lpf_ofdm_20mhz[core] = 4;
lpf_11b[core] = 1;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
scap_val_11n_20[core] = 0xc;
bcap_val_11n_20[core] = 0xc;
scap_val_11n_40[core] = 0xa;
@@ -2982,7 +2982,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
conv = 0x7f;
filt = 0xee;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (core = 0; core < 2; core++) {
if (core == 0) {
b43_radio_write(dev, 0x5F, bias);
@@ -2998,7 +2998,7 @@ static void b43_nphy_workarounds_rev7plus(struct b43_wldev *dev)
}
if (b43_nphy_ipa(dev)) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (phy->radio_rev == 3 || phy->radio_rev == 4 ||
phy->radio_rev == 6) {
for (core = 0; core < 2; core++) {
@@ -3221,7 +3221,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
ARRAY_SIZE(rx2tx_events));
}
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ tmp16 = (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ?
0x2 : 0x9C40;
b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
@@ -3240,7 +3240,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
pdet_range = sprom->fem.ghz2.pdet_range;
else
pdet_range = sprom->fem.ghz5.pdet_range;
@@ -3249,7 +3249,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
switch (pdet_range) {
case 3:
if (!(dev->phy.rev >= 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
break;
/* FALL THROUGH */
case 0:
@@ -3261,7 +3261,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
break;
case 2:
if (dev->phy.rev >= 6) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
vmid[3] = 0x94;
else
vmid[3] = 0x8e;
@@ -3277,7 +3277,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
break;
case 4:
case 5:
- if (b43_current_band(dev->wl) != IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) != NL80211_BAND_2GHZ) {
if (pdet_range == 4) {
vmid[3] = 0x8e;
tmp16 = 0x96;
@@ -3322,9 +3322,9 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
/* N PHY WAR TX Chain Update with hw_phytxchain as argument */
if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ||
(sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ))
tmp32 = 0x00088888;
else
tmp32 = 0x88888888;
@@ -3333,7 +3333,7 @@ static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
0x70);
b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
@@ -3376,7 +3376,7 @@ static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
delays1[5] = 0x14;
}
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ &&
nphy->band5g_pwrgain) {
b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
@@ -3451,7 +3451,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_nphy_classifier(dev, 1, 0);
else
b43_nphy_classifier(dev, 1, 1);
@@ -3586,7 +3586,7 @@ static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
gain = (target.pad[core]) | (target.pga[core] << 4) |
(target.txgm[core] << 8);
- indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ indx = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ?
1 : 0;
for (i = 0; i < 9; i++)
if (tbl_iqcal_gainparams[indx][i][0] == gain)
@@ -3614,7 +3614,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
struct b43_phy_n *nphy = dev->phy.n;
u8 i;
u16 bmask, val, tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
@@ -3679,7 +3679,7 @@ static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
}
b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
if (phy->rev >= 19) {
/* TODO */
} else if (phy->rev >= 7) {
@@ -3770,7 +3770,7 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
txpi[0] = 72;
txpi[1] = 72;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
txpi[0] = sprom->txpid2g[0];
txpi[1] = sprom->txpid2g[1];
} else if (freq >= 4900 && freq < 5100) {
@@ -3868,7 +3868,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
} else if (phy->rev >= 7) {
for (core = 0; core < 2; core++) {
r = core ? 0x190 : 0x170;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, r + 0x5, 0x5);
b43_radio_write(dev, r + 0x9, 0xE);
if (phy->rev != 5)
@@ -3892,7 +3892,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
b43_radio_write(dev, r + 0xC, 0);
}
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
else
b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
@@ -3909,7 +3909,7 @@ static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
0x5);
if (phy->rev != 5)
@@ -4098,7 +4098,7 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
b0[0] = b0[1] = 5612;
b1[0] = b1[1] = -1393;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
for (c = 0; c < 2; c++) {
idle[c] = nphy->pwr_ctl_info[c].idle_tssi_2g;
target[c] = sprom->core_pwr_info[c].maxpwr_2g;
@@ -4153,11 +4153,11 @@ static void b43_nphy_tx_power_ctl_setup(struct b43_wldev *dev)
for (c = 0; c < 2; c++) {
r = c ? 0x190 : 0x170;
if (b43_nphy_ipa(dev))
- b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ? 0xE : 0xC);
+ b43_radio_write(dev, r + 0x9, (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) ? 0xE : 0xC);
}
} else {
if (b43_nphy_ipa(dev)) {
- tmp = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ tmp = (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) ? 0xC : 0xE;
b43_radio_write(dev,
B2056_TX0 | B2056_TX_TX_SSI_MUX, tmp);
b43_radio_write(dev,
@@ -4267,13 +4267,13 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
} else if (phy->rev >= 7) {
pga_gain = (table[i] >> 24) & 0xf;
pad_gain = (table[i] >> 19) & 0x1f;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
rfpwr_offset = rf_pwr_offset_table[pad_gain];
else
rfpwr_offset = rf_pwr_offset_table[pga_gain];
} else {
pga_gain = (table[i] >> 24) & 0xF;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
else
rfpwr_offset = 0; /* FIXME */
@@ -4288,7 +4288,7 @@ static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
{
struct b43_phy_n *nphy = dev->phy.n;
- enum ieee80211_band band;
+ enum nl80211_band band;
u16 tmp;
if (!enable) {
@@ -4300,12 +4300,12 @@ static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
if (dev->phy.rev >= 7) {
tmp = 0x1480;
} else if (dev->phy.rev >= 3) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tmp = 0x600;
else
tmp = 0x480;
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tmp = 0x180;
else
tmp = 0x120;
@@ -4734,7 +4734,7 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
u16 *rssical_radio_regs = NULL;
u16 *rssical_phy_regs = NULL;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!nphy->rssical_chanspec_2G.center_freq)
return;
rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
@@ -4804,7 +4804,7 @@ static void b43_nphy_tx_cal_radio_setup_rev7(struct b43_wldev *dev)
save[off + 7] = b43_radio_read(dev, r + R2057_TX0_TSSIG);
save[off + 8] = b43_radio_read(dev, r + R2057_TX0_TSSI_MISC1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, r + R2057_TX0_TX_SSI_MASTER, 0xA);
b43_radio_write(dev, r + R2057_TX0_IQCAL_VCM_HG, 0x43);
b43_radio_write(dev, r + R2057_TX0_IQCAL_IDAC, 0x55);
@@ -4864,7 +4864,7 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
save[offset + 9] = b43_radio_read(dev, B2055_XOMISC);
save[offset + 10] = b43_radio_read(dev, B2055_PLL_LFC1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
b43_radio_write(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
b43_radio_write(dev, tmp | B2055_CAL_LPOCTL, 0x40);
b43_radio_write(dev, tmp | B2055_CAL_TS, 0x55);
@@ -5005,7 +5005,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
tbl_tx_filter_coef_rev4[3]);
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
b43_nphy_pa_set_tx_dig_filter(dev, 0x186,
tbl_tx_filter_coef_rev4[5]);
if (dev->phy.channel == 14)
@@ -5185,7 +5185,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
false, 0);
} else if (phy->rev == 7) {
b43_radio_maskset(dev, R2057_OVR_REG0, 1 << 4, 1 << 4);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE0, ~1, 0);
b43_radio_maskset(dev, R2057_PAD2G_TUNE_PUS_CORE1, ~1, 0);
} else {
@@ -5210,7 +5210,7 @@ static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp = 0x0180;
else
tmp = 0x0120;
@@ -5233,7 +5233,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
if (nphy->hang_avoid)
b43_nphy_stay_in_carrier_search(dev, 1);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
iqcal_chanspec = &nphy->iqcal_chanspec_2G;
@@ -5304,7 +5304,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
u16 *txcal_radio_regs = NULL;
struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!nphy->iqcal_chanspec_2G.center_freq)
return;
table = nphy->cal_cache.txcal_coeffs_2G;
@@ -5332,7 +5332,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
if (dev->phy.rev < 2)
b43_nphy_tx_iq_workaround(dev);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
} else {
@@ -5422,7 +5422,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
phy6or5x = dev->phy.rev >= 6 ||
(dev->phy.rev == 5 && nphy->ipa2g_on &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ b43_current_band(dev->wl) == NL80211_BAND_2GHZ);
if (phy6or5x) {
if (b43_is_40mhz(dev)) {
b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
@@ -5657,7 +5657,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
u16 tmp[6];
u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
u32 real, imag;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 use;
u16 cur_hpf;
@@ -5712,18 +5712,18 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
band = b43_current_band(dev->wl);
if (nphy->rxcalparams & 0xFF000000) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[0], 0x140);
else
b43_phy_write(dev, rfctl[0], 0x110);
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[0], 0x180);
else
b43_phy_write(dev, rfctl[0], 0x120);
}
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
b43_phy_write(dev, rfctl[1], 0x148);
else
b43_phy_write(dev, rfctl[1], 0x114);
@@ -5919,7 +5919,7 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
#if 0
/* Some extra gains */
hw_gain = 6; /* N-PHY specific */
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
hw_gain += sprom->antenna_gain.a0;
else
hw_gain += sprom->antenna_gain.a1;
@@ -6043,7 +6043,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
u8 tx_pwr_state;
struct nphy_txgains target;
u16 tmp;
- enum ieee80211_band tmp2;
+ enum nl80211_band tmp2;
bool do_rssi_cal;
u16 clip[2];
@@ -6051,7 +6051,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
if ((dev->phy.rev >= 3) &&
(sprom->boardflags_lo & B43_BFL_EXTLNA) &&
- (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)) {
switch (dev->dev->bus_type) {
#ifdef CONFIG_B43_BCMA
case B43_BUS_BCMA:
@@ -6170,7 +6170,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
b43_nphy_classifier(dev, 0, 0);
b43_nphy_read_clip_detection(dev, clip);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
b43_nphy_bphy_init(dev);
tx_pwr_state = nphy->txpwrctrl;
@@ -6187,7 +6187,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
do_rssi_cal = false;
if (phy->rev >= 3) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
do_rssi_cal = !nphy->rssical_chanspec_2G.center_freq;
else
do_rssi_cal = !nphy->rssical_chanspec_5G.center_freq;
@@ -6201,7 +6201,7 @@ static int b43_phy_initn(struct b43_wldev *dev)
}
if (!((nphy->measure_hold & 0x6) != 0)) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
do_cal = !nphy->iqcal_chanspec_2G.center_freq;
else
do_cal = !nphy->iqcal_chanspec_5G.center_freq;
@@ -6291,7 +6291,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
int ch = new_channel->hw_value;
u16 tmp16;
- if (new_channel->band == IEEE80211_BAND_5GHZ) {
+ if (new_channel->band == NL80211_BAND_5GHZ) {
/* Switch to 2 GHz for a moment to access B43_PHY_B_BBCFG */
b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
@@ -6302,7 +6302,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
B43_PHY_B_BBCFG_RSTCCA | B43_PHY_B_BBCFG_RSTRX);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16);
b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
- } else if (new_channel->band == IEEE80211_BAND_2GHZ) {
+ } else if (new_channel->band == NL80211_BAND_2GHZ) {
b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
tmp16 = b43_read16(dev, B43_MMIO_PSM_PHY_HDR);
b43_write16(dev, B43_MMIO_PSM_PHY_HDR, tmp16 | 4);
@@ -6319,7 +6319,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
} else {
b43_nphy_classifier(dev, 2, 2);
- if (new_channel->band == IEEE80211_BAND_2GHZ)
+ if (new_channel->band == NL80211_BAND_2GHZ)
b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
}
@@ -6449,7 +6449,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
&(tabent_r7->phy_regs) : &(tabent_r7_2g->phy_regs);
if (phy->radio_rev <= 4 || phy->radio_rev == 6) {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 2 : 0;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 2 : 0;
b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE0, ~2, tmp);
b43_radio_maskset(dev, R2057_TIA_CONFIG_CORE1, ~2, tmp);
}
@@ -6457,12 +6457,12 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
b43_radio_2057_setup(dev, tabent_r7, tabent_r7_2g);
b43_nphy_channel_setup(dev, phy_regs, channel);
} else if (phy->rev >= 3) {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 4 : 0;
b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
b43_radio_2056_setup(dev, tabent_r3);
b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
} else {
- tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
+ tmp = (channel->band == NL80211_BAND_5GHZ) ? 0x0020 : 0x0050;
b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
b43_radio_2055_setup(dev, tabent_r2);
b43_nphy_channel_setup(dev, &(tabent_r2->phy_regs), channel);
@@ -6692,7 +6692,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
enum nl80211_channel_type channel_type =
cfg80211_get_chandef_type(&dev->wl->hw->conf.chandef);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if ((new_channel < 1) || (new_channel > 14))
return -EINVAL;
} else {
@@ -6705,7 +6705,7 @@ static int b43_nphy_op_switch_channel(struct b43_wldev *dev,
static unsigned int b43_nphy_op_get_default_chan(struct b43_wldev *dev)
{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
return 1;
return 36;
}
diff --git a/drivers/net/wireless/broadcom/b43/tables_lpphy.c b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
index cff187c56..ce01e1645 100644
--- a/drivers/net/wireless/broadcom/b43/tables_lpphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_lpphy.c
@@ -560,7 +560,7 @@ void b2062_upload_init_table(struct b43_wldev *dev)
for (i = 0; i < ARRAY_SIZE(b2062_init_tab); i++) {
e = &b2062_init_tab[i];
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
@@ -579,7 +579,7 @@ void b2063_upload_init_table(struct b43_wldev *dev)
for (i = 0; i < ARRAY_SIZE(b2063_init_tab); i++) {
e = &b2063_init_tab[i];
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (!(e->flags & B206X_FLAG_G))
continue;
b43_radio_write(dev, e->offset, e->value_g);
@@ -2379,12 +2379,12 @@ static void lpphy_rev2plus_write_gain_table(struct b43_wldev *dev, int offset,
tmp |= data.pga << 8;
tmp |= data.gm;
if (dev->phy.rev >= 3) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp |= 0x10 << 24;
else
tmp |= 0x70 << 24;
} else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
tmp |= 0x14 << 24;
else
tmp |= 0x7F << 24;
@@ -2423,7 +2423,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev0_2ghz_tx_gain_table);
else
@@ -2435,7 +2435,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
(sprom->boardflags_lo & B43_BFL_HGPA))
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev1_2ghz_tx_gain_table);
else
@@ -2446,7 +2446,7 @@ void lpphy_init_tx_gain_table(struct b43_wldev *dev)
if (sprom->boardflags_hi & B43_BFH_NOPA)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_nopa_tx_gain_table);
- else if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ else if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ)
lpphy_write_gain_table_bulk(dev, 0, 128,
lpphy_rev2_2ghz_tx_gain_table);
else
diff --git a/drivers/net/wireless/broadcom/b43/tables_nphy.c b/drivers/net/wireless/broadcom/b43/tables_nphy.c
index b2f0d245b..44e0957a7 100644
--- a/drivers/net/wireless/broadcom/b43/tables_nphy.c
+++ b/drivers/net/wireless/broadcom/b43/tables_nphy.c
@@ -3502,7 +3502,7 @@ static void b43_nphy_tables_init_rev7_volatile(struct b43_wldev *dev)
{ 0x2, 0x18, 0x2 }, /* Core 1 */
};
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
antswlut = sprom->fem.ghz5.antswlut;
else
antswlut = sprom->fem.ghz2.antswlut;
@@ -3566,7 +3566,7 @@ static void b43_nphy_tables_init_rev3(struct b43_wldev *dev)
struct ssb_sprom *sprom = dev->dev->bus_sprom;
u8 antswlut;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ)
antswlut = sprom->fem.ghz5.antswlut;
else
antswlut = sprom->fem.ghz2.antswlut;
@@ -3651,7 +3651,7 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
switch (phy->rev) {
case 17:
if (phy->radio_rev == 14)
@@ -3698,17 +3698,17 @@ static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- enum ieee80211_band band = b43_current_band(dev->wl);
+ enum nl80211_band band = b43_current_band(dev->wl);
struct ssb_sprom *sprom = dev->dev->bus_sprom;
if (dev->phy.rev < 3)
return b43_ntab_tx_gain_rev0_1_2;
/* rev 3+ */
- if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
- (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) {
+ if ((dev->phy.n->ipa2g_on && band == NL80211_BAND_2GHZ) ||
+ (dev->phy.n->ipa5g_on && band == NL80211_BAND_5GHZ)) {
return b43_nphy_get_ipa_gain_table(dev);
- } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ } else if (b43_current_band(dev->wl) == NL80211_BAND_5GHZ) {
switch (phy->rev) {
case 6:
case 5:
@@ -3746,7 +3746,7 @@ const s16 *b43_ntab_get_rf_pwr_offset_table(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
switch (phy->rev) {
case 17:
if (phy->radio_rev == 14)
diff --git a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
index e347b8d80..704ef1bcb 100644
--- a/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
+++ b/drivers/net/wireless/broadcom/b43/tables_phy_lcn.c
@@ -701,7 +701,7 @@ void b43_phy_lcn_tables_init(struct b43_wldev *dev)
b43_phy_lcn_upload_static_tables(dev);
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (b43_current_band(dev->wl) == NL80211_BAND_2GHZ) {
if (sprom->boardflags_lo & B43_BFL_FEM)
b43_phy_lcn_load_tx_gain_tab(dev,
b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
diff --git a/drivers/net/wireless/broadcom/b43/xmit.c b/drivers/net/wireless/broadcom/b43/xmit.c
index 426dc13c4..f6201264d 100644
--- a/drivers/net/wireless/broadcom/b43/xmit.c
+++ b/drivers/net/wireless/broadcom/b43/xmit.c
@@ -803,7 +803,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
chanid = (chanstat & B43_RX_CHAN_ID) >> B43_RX_CHAN_ID_SHIFT;
switch (chanstat & B43_RX_CHAN_PHYTYPE) {
case B43_PHYTYPE_A:
- status.band = IEEE80211_BAND_5GHZ;
+ status.band = NL80211_BAND_5GHZ;
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
@@ -811,7 +811,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
/* Somewhere between 478.104 and 508.1084 firmware for G-PHY
* has been modified to be compatible with N-PHY and others.
*/
@@ -826,9 +826,9 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ)
- status.band = IEEE80211_BAND_5GHZ;
+ status.band = NL80211_BAND_5GHZ;
else
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
status.freq =
ieee80211_channel_to_frequency(chanid, status.band);
break;
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 8491b6f4d..17aa4fd21 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -1055,7 +1055,7 @@ static void b43legacy_write_probe_resp_plcp(struct b43legacy_wldev *dev,
b43legacy_generate_plcp_hdr(&plcp, size + FCS_LEN, rate->hw_value);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
size,
rate);
/* Write PLCP in two parts and timing for packet transfer */
@@ -1121,7 +1121,7 @@ static const u8 *b43legacy_generate_probe_resp(struct b43legacy_wldev *dev,
IEEE80211_STYPE_PROBE_RESP);
dur = ieee80211_generic_frame_duration(dev->wl->hw,
dev->wl->vif,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
*dest_size,
rate);
hdr->duration_id = dur;
@@ -2716,7 +2716,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
/* Switch the PHY mode (if necessary). */
switch (conf->chandef.chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (phy->type == B43legacy_PHYTYPE_B)
new_phymode = B43legacy_PHYMODE_B;
else
@@ -2789,7 +2789,7 @@ out_unlock_mutex:
static void b43legacy_update_basic_rates(struct b43legacy_wldev *dev, u32 brates)
{
struct ieee80211_supported_band *sband =
- dev->wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ dev->wl->hw->wiphy->bands[NL80211_BAND_2GHZ];
struct ieee80211_rate *rate;
int i;
u16 basic, direct, offset, basic_offset, rateptr;
@@ -3627,13 +3627,13 @@ static int b43legacy_setup_modes(struct b43legacy_wldev *dev,
phy->possible_phymodes = 0;
if (have_bphy) {
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
&b43legacy_band_2GHz_BPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_B;
}
if (have_gphy) {
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
&b43legacy_band_2GHz_GPHY;
phy->possible_phymodes |= B43legacy_PHYMODE_G;
}
diff --git a/drivers/net/wireless/broadcom/b43legacy/xmit.c b/drivers/net/wireless/broadcom/b43legacy/xmit.c
index 34bf3f0b7..35ccf400b 100644
--- a/drivers/net/wireless/broadcom/b43legacy/xmit.c
+++ b/drivers/net/wireless/broadcom/b43legacy/xmit.c
@@ -565,7 +565,7 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
switch (chanstat & B43legacy_RX_CHAN_PHYTYPE) {
case B43legacy_PHYTYPE_B:
case B43legacy_PHYTYPE_G:
- status.band = IEEE80211_BAND_2GHZ;
+ status.band = NL80211_BAND_2GHZ;
status.freq = chanid + 2400;
break;
default:
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
index 6af658e44..d1bc51f92 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c
@@ -321,7 +321,8 @@ brcmf_proto_bcdc_hdrpull(struct brcmf_pub *drvr, bool do_fws,
if (pktbuf->len == 0)
return -ENODATA;
- *ifp = tmp_if;
+ if (ifp != NULL)
+ *ifp = tmp_if;
return 0;
}
@@ -351,6 +352,12 @@ brcmf_proto_bcdc_add_tdls_peer(struct brcmf_pub *drvr, int ifidx,
{
}
+static void brcmf_proto_bcdc_rxreorder(struct brcmf_if *ifp,
+ struct sk_buff *skb)
+{
+ brcmf_fws_rxreorder(ifp, skb);
+}
+
int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
{
struct brcmf_bcdc *bcdc;
@@ -372,6 +379,7 @@ int brcmf_proto_bcdc_attach(struct brcmf_pub *drvr)
drvr->proto->configure_addr_mode = brcmf_proto_bcdc_configure_addr_mode;
drvr->proto->delete_peer = brcmf_proto_bcdc_delete_peer;
drvr->proto->add_tdls_peer = brcmf_proto_bcdc_add_tdls_peer;
+ drvr->proto->rxreorder = brcmf_proto_bcdc_rxreorder;
drvr->proto->pd = bcdc;
drvr->hdrlen += BCDC_HEADER_LEN + BRCMF_PROT_FW_SIGNAL_MAX_TXBYTES;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index da0cdd313..c7550dab6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -250,7 +250,7 @@ static int brcmf_sdiod_request_data(struct brcmf_sdio_dev *sdiodev, u8 fn,
u32 addr, u8 regsz, void *data, bool write)
{
struct sdio_func *func;
- int ret;
+ int ret = -EINVAL;
brcmf_dbg(SDIO, "rw=%d, func=%d, addr=0x%05x, nbytes=%d\n",
write, fn, addr, regsz);
@@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = {
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4345),
BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354),
+ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356),
{ /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
index 8e02a478e..2b2465456 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
@@ -216,7 +216,9 @@ bool brcmf_c_prec_enq(struct device *dev, struct pktq *q, struct sk_buff *pkt,
int prec);
/* Receive frame for delivery to OS. Callee disposes of rxp. */
-void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp);
+void brcmf_rx_frame(struct device *dev, struct sk_buff *rxp, bool handle_event);
+/* Receive async event packet from firmware. Callee disposes of rxp. */
+void brcmf_rx_event(struct device *dev, struct sk_buff *rxp);
/* Indication from bus module regarding presence/insertion of dongle. */
int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index d5c2a2757..62f475e31 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -144,7 +144,7 @@ static struct ieee80211_rate __wl_rates[] = {
#define wl_a_rates_size (wl_g_rates_size - 4)
#define CHAN2G(_channel, _freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
@@ -153,7 +153,7 @@ static struct ieee80211_rate __wl_rates[] = {
}
#define CHAN5G(_channel) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = IEEE80211_CHAN_DISABLED, \
@@ -181,13 +181,13 @@ static struct ieee80211_channel __wl_5ghz_channels[] = {
* above is added to the band during setup.
*/
static const struct ieee80211_supported_band __wl_band_2ghz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.bitrates = wl_g_rates,
.n_bitrates = wl_g_rates_size,
};
static const struct ieee80211_supported_band __wl_band_5ghz = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.bitrates = wl_a_rates,
.n_bitrates = wl_a_rates_size,
};
@@ -250,6 +250,20 @@ struct parsed_vndr_ies {
struct parsed_vndr_ie_info ie_info[VNDR_IE_PARSE_LIMIT];
};
+static u8 nl80211_band_to_fwil(enum nl80211_band band)
+{
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ return WLC_BAND_2G;
+ case NL80211_BAND_5GHZ:
+ return WLC_BAND_5G;
+ default:
+ WARN_ON(1);
+ break;
+ }
+ return 0;
+}
+
static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
struct cfg80211_chan_def *ch)
{
@@ -292,13 +306,13 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
WARN_ON_ONCE(1);
}
switch (ch->chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
ch_inf.band = BRCMU_CHAN_BAND_2G;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
ch_inf.band = BRCMU_CHAN_BAND_5G;
break;
- case IEEE80211_BAND_60GHZ:
+ case NL80211_BAND_60GHZ:
default:
WARN_ON_ONCE(1);
}
@@ -1796,6 +1810,50 @@ enum nl80211_auth_type brcmf_war_auth_type(struct brcmf_if *ifp,
return type;
}
+static void brcmf_set_join_pref(struct brcmf_if *ifp,
+ struct cfg80211_bss_selection *bss_select)
+{
+ struct brcmf_join_pref_params join_pref_params[2];
+ enum nl80211_band band;
+ int err, i = 0;
+
+ join_pref_params[i].len = 2;
+ join_pref_params[i].rssi_gain = 0;
+
+ if (bss_select->behaviour != NL80211_BSS_SELECT_ATTR_BAND_PREF)
+ brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_ASSOC_PREFER, WLC_BAND_AUTO);
+
+ switch (bss_select->behaviour) {
+ case __NL80211_BSS_SELECT_ATTR_INVALID:
+ brcmf_c_set_joinpref_default(ifp);
+ return;
+ case NL80211_BSS_SELECT_ATTR_BAND_PREF:
+ join_pref_params[i].type = BRCMF_JOIN_PREF_BAND;
+ band = bss_select->param.band_pref;
+ join_pref_params[i].band = nl80211_band_to_fwil(band);
+ i++;
+ break;
+ case NL80211_BSS_SELECT_ATTR_RSSI_ADJUST:
+ join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ band = bss_select->param.adjust.band;
+ join_pref_params[i].band = nl80211_band_to_fwil(band);
+ join_pref_params[i].rssi_gain = bss_select->param.adjust.delta;
+ i++;
+ break;
+ case NL80211_BSS_SELECT_ATTR_RSSI:
+ default:
+ break;
+ }
+ join_pref_params[i].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[i].len = 2;
+ join_pref_params[i].rssi_gain = 0;
+ join_pref_params[i].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+}
+
static s32
brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_connect_params *sme)
@@ -1952,6 +2010,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ext_join_params->scan_le.nprobes = cpu_to_le32(-1);
}
+ brcmf_set_join_pref(ifp, &sme->bss_select);
+
err = brcmf_fil_bsscfg_data_set(ifp, "join", ext_join_params,
join_params_size);
kfree(ext_join_params);
@@ -2480,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac, struct station_info *sinfo)
{
struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_scb_val_le scb_val;
s32 err = 0;
struct brcmf_sta_info_le sta_info_le;
u32 sta_flags;
u32 is_tdls_peer;
s32 total_rssi;
s32 count_rssi;
+ int rssi;
u32 i;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
@@ -2569,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
total_rssi /= count_rssi;
sinfo->signal = total_rssi;
+ } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED,
+ &ifp->vif->sme_state)) {
+ memset(&scb_val, 0, sizeof(scb_val));
+ err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI,
+ &scb_val, sizeof(scb_val));
+ if (err) {
+ brcmf_err("Could not get rssi (%d)\n", err);
+ goto done;
+ } else {
+ rssi = le32_to_cpu(scb_val.val);
+ sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
+ sinfo->signal = rssi;
+ brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
+ }
}
}
done:
@@ -2679,9 +2755,9 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_info *cfg,
channel = bi->ctl_ch;
if (channel <= CH_MAX_2G_CHANNEL)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(channel, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -2788,9 +2864,9 @@ static s32 brcmf_inform_ibss(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
cfg->channel = freq;
@@ -3608,7 +3684,8 @@ static void brcmf_configure_wowl(struct brcmf_cfg80211_info *cfg,
if (!test_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state))
wowl_config |= BRCMF_WOWL_UNASSOC;
- brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear", strlen("clear"));
+ brcmf_fil_iovar_data_set(ifp, "wowl_wakeind", "clear",
+ sizeof(struct brcmf_wowl_wakeind_le));
brcmf_fil_iovar_int_set(ifp, "wowl", wowl_config);
brcmf_fil_iovar_int_set(ifp, "wowl_activate", 1);
brcmf_bus_wowl_config(cfg->pub->bus_if, true);
@@ -5215,9 +5292,9 @@ brcmf_bss_roaming_done(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G)
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
else
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
freq = ieee80211_channel_to_frequency(ch.chnum, band->band);
notify_channel = ieee80211_get_channel(wiphy, freq);
@@ -5707,11 +5784,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
}
wiphy = cfg_to_wiphy(cfg);
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
if (band)
for (i = 0; i < band->n_channels; i++)
band->channels[i].flags = IEEE80211_CHAN_DISABLED;
@@ -5722,9 +5799,9 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
cfg->d11inf.decchspec(&ch);
if (ch.band == BRCMU_CHAN_BAND_2G) {
- band = wiphy->bands[IEEE80211_BAND_2GHZ];
+ band = wiphy->bands[NL80211_BAND_2GHZ];
} else if (ch.band == BRCMU_CHAN_BAND_5G) {
- band = wiphy->bands[IEEE80211_BAND_5GHZ];
+ band = wiphy->bands[NL80211_BAND_5GHZ];
} else {
brcmf_err("Invalid channel Spec. 0x%x.\n", ch.chspec);
continue;
@@ -5839,7 +5916,7 @@ static int brcmf_enable_bw40_2g(struct brcmf_cfg80211_info *cfg)
return err;
}
- band = cfg_to_wiphy(cfg)->bands[IEEE80211_BAND_2GHZ];
+ band = cfg_to_wiphy(cfg)->bands[NL80211_BAND_2GHZ];
list = (struct brcmf_chanspec_list *)pbuf;
num_chan = le32_to_cpu(list->count);
for (i = 0; i < num_chan; i++) {
@@ -5871,11 +5948,11 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
band = WLC_BAND_2G;
err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
if (!err) {
- bw_cap[IEEE80211_BAND_2GHZ] = band;
+ bw_cap[NL80211_BAND_2GHZ] = band;
band = WLC_BAND_5G;
err = brcmf_fil_iovar_int_get(ifp, "bw_cap", &band);
if (!err) {
- bw_cap[IEEE80211_BAND_5GHZ] = band;
+ bw_cap[NL80211_BAND_5GHZ] = band;
return;
}
WARN_ON(1);
@@ -5890,14 +5967,14 @@ static void brcmf_get_bwcap(struct brcmf_if *ifp, u32 bw_cap[])
switch (mimo_bwcap) {
case WLC_N_BW_40ALL:
- bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_40MHZ_BIT;
/* fall-thru */
case WLC_N_BW_20IN2G_40IN5G:
- bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_40MHZ_BIT;
/* fall-thru */
case WLC_N_BW_20ALL:
- bw_cap[IEEE80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
- bw_cap[IEEE80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_2GHZ] |= WLC_BW_20MHZ_BIT;
+ bw_cap[NL80211_BAND_5GHZ] |= WLC_BW_20MHZ_BIT;
break;
default:
brcmf_err("invalid mimo_bw_cap value\n");
@@ -5938,7 +6015,7 @@ static void brcmf_update_vht_cap(struct ieee80211_supported_band *band,
__le16 mcs_map;
/* not allowed in 2.4G band */
- if (band->band == IEEE80211_BAND_2GHZ)
+ if (band->band == NL80211_BAND_2GHZ)
return;
band->vht_cap.vht_supported = true;
@@ -5997,8 +6074,8 @@ static int brcmf_setup_wiphybands(struct wiphy *wiphy)
brcmf_get_bwcap(ifp, bw_cap);
}
brcmf_dbg(INFO, "nmode=%d, vhtmode=%d, bw_cap=(%d, %d)\n",
- nmode, vhtmode, bw_cap[IEEE80211_BAND_2GHZ],
- bw_cap[IEEE80211_BAND_5GHZ]);
+ nmode, vhtmode, bw_cap[NL80211_BAND_2GHZ],
+ bw_cap[NL80211_BAND_5GHZ]);
err = brcmf_fil_iovar_int_get(ifp, "rxchain", &rxchain);
if (err) {
@@ -6279,6 +6356,10 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
wiphy->n_cipher_suites = ARRAY_SIZE(brcmf_cipher_suites);
if (!brcmf_feat_is_enabled(ifp, BRCMF_FEAT_MFP))
wiphy->n_cipher_suites--;
+ wiphy->bss_select_support = BIT(NL80211_BSS_SELECT_ATTR_RSSI) |
+ BIT(NL80211_BSS_SELECT_ATTR_BAND_PREF) |
+ BIT(NL80211_BSS_SELECT_ATTR_RSSI_ADJUST);
+
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -6321,7 +6402,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
band->n_channels = ARRAY_SIZE(__wl_2ghz_channels);
- wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ wiphy->bands[NL80211_BAND_2GHZ] = band;
}
if (bandlist[i] == cpu_to_le32(WLC_BAND_5G)) {
band = kmemdup(&__wl_band_5ghz, sizeof(__wl_band_5ghz),
@@ -6338,7 +6419,7 @@ static int brcmf_setup_wiphy(struct wiphy *wiphy, struct brcmf_if *ifp)
}
band->n_channels = ARRAY_SIZE(__wl_5ghz_channels);
- wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ wiphy->bands[NL80211_BAND_5GHZ] = band;
}
}
err = brcmf_setup_wiphybands(wiphy);
@@ -6604,13 +6685,13 @@ static void brcmf_free_wiphy(struct wiphy *wiphy)
kfree(wiphy->iface_combinations[i].limits);
}
kfree(wiphy->iface_combinations);
- if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_2GHZ]);
+ if (wiphy->bands[NL80211_BAND_2GHZ]) {
+ kfree(wiphy->bands[NL80211_BAND_2GHZ]->channels);
+ kfree(wiphy->bands[NL80211_BAND_2GHZ]);
}
- if (wiphy->bands[IEEE80211_BAND_5GHZ]) {
- kfree(wiphy->bands[IEEE80211_BAND_5GHZ]->channels);
- kfree(wiphy->bands[IEEE80211_BAND_5GHZ]);
+ if (wiphy->bands[NL80211_BAND_5GHZ]) {
+ kfree(wiphy->bands[NL80211_BAND_5GHZ]->channels);
+ kfree(wiphy->bands[NL80211_BAND_5GHZ]);
}
wiphy_free(wiphy);
}
@@ -6698,8 +6779,8 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
* cfg80211 here that we do and have it decide we can enable
* it. But first check if device does support 2G operation.
*/
- if (wiphy->bands[IEEE80211_BAND_2GHZ]) {
- cap = &wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap.cap;
+ if (wiphy->bands[NL80211_BAND_2GHZ]) {
+ cap = &wiphy->bands[NL80211_BAND_2GHZ]->ht_cap.cap;
*cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
err = wiphy_register(wiphy);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
index 0e8f2a079..d3fd6b1db 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c
@@ -1333,6 +1333,7 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
switch (pub->chip) {
case BRCM_CC_4354_CHIP_ID:
+ case BRCM_CC_4356_CHIP_ID:
/* explicitly check SR engine enable bit */
pmu_cc3_mask = BIT(2);
/* fall-through */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
index 9e909e3c2..3e15d64c6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
@@ -38,7 +38,7 @@ const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
-/* boost value for RSSI_DELTA in preferred join selection */
+/* default boost value for RSSI_DELTA in preferred join selection */
#define BRCMF_JOIN_PREF_RSSI_BOOST 8
#define BRCMF_DEFAULT_TXGLOM_SIZE 32 /* max tx frames in glom chain */
@@ -83,11 +83,31 @@ MODULE_PARM_DESC(ignore_probe_fail, "always succeed probe for debugging");
static struct brcmfmac_platform_data *brcmfmac_pdata;
struct brcmf_mp_global_t brcmf_mp_global;
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp)
+{
+ struct brcmf_join_pref_params join_pref_params[2];
+ int err;
+
+ /* Setup join_pref to select target by RSSI (boost on 5GHz) */
+ join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
+ join_pref_params[0].len = 2;
+ join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
+ join_pref_params[0].band = WLC_BAND_5G;
+
+ join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
+ join_pref_params[1].len = 2;
+ join_pref_params[1].rssi_gain = 0;
+ join_pref_params[1].band = 0;
+ err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
+ sizeof(join_pref_params));
+ if (err)
+ brcmf_err("Set join_pref error (%d)\n", err);
+}
+
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
- struct brcmf_join_pref_params join_pref_params[2];
struct brcmf_rev_info_le revinfo;
struct brcmf_rev_info *ri;
char *ptr;
@@ -154,19 +174,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
goto done;
}
- /* Setup join_pref to select target by RSSI(with boost on 5GHz) */
- join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
- join_pref_params[0].len = 2;
- join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
- join_pref_params[0].band = WLC_BAND_5G;
- join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
- join_pref_params[1].len = 2;
- join_pref_params[1].rssi_gain = 0;
- join_pref_params[1].band = 0;
- err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
- sizeof(join_pref_params));
- if (err)
- brcmf_err("Set join_pref error (%d)\n", err);
+ brcmf_c_set_joinpref_default(ifp);
/* Setup event_msgs, enable E_IF */
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index ff825cd77..b590499f6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -40,19 +40,6 @@
#define MAX_WAIT_FOR_8021X_TX msecs_to_jiffies(950)
-/* AMPDU rx reordering definitions */
-#define BRCMF_RXREORDER_FLOWID_OFFSET 0
-#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
-#define BRCMF_RXREORDER_FLAGS_OFFSET 4
-#define BRCMF_RXREORDER_CURIDX_OFFSET 6
-#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
-
-#define BRCMF_RXREORDER_DEL_FLOW 0x01
-#define BRCMF_RXREORDER_FLUSH_ALL 0x02
-#define BRCMF_RXREORDER_CURIDX_VALID 0x04
-#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
-#define BRCMF_RXREORDER_NEW_HOLE 0x10
-
#define BRCMF_BSSIDX_INVALID -1
char *brcmf_ifname(struct brcmf_if *ifp)
@@ -313,15 +300,9 @@ void brcmf_txflowblock(struct device *dev, bool state)
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
{
- skb->dev = ifp->ndev;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
if (skb->pkt_type == PACKET_MULTICAST)
ifp->stats.multicast++;
- /* Process special event packets */
- brcmf_fweh_process_skb(ifp->drvr, skb);
-
if (!(ifp->ndev->flags & IFF_UP)) {
brcmu_pkt_buf_free_skb(skb);
return;
@@ -341,226 +322,60 @@ void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
netif_rx_ni(skb);
}
-static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
- u8 start, u8 end,
- struct sk_buff_head *skb_list)
+static int brcmf_rx_hdrpull(struct brcmf_pub *drvr, struct sk_buff *skb,
+ struct brcmf_if **ifp)
{
- /* initialize return list */
- __skb_queue_head_init(skb_list);
+ int ret;
- if (rfi->pend_pkts == 0) {
- brcmf_dbg(INFO, "no packets in reorder queue\n");
- return;
+ /* process and remove protocol-specific header */
+ ret = brcmf_proto_hdrpull(drvr, true, skb, ifp);
+
+ if (ret || !(*ifp) || !(*ifp)->ndev) {
+ if (ret != -ENODATA && *ifp)
+ (*ifp)->stats.rx_errors++;
+ brcmu_pkt_buf_free_skb(skb);
+ return -ENODATA;
}
- do {
- if (rfi->pktslots[start]) {
- __skb_queue_tail(skb_list, rfi->pktslots[start]);
- rfi->pktslots[start] = NULL;
- }
- start++;
- if (start > rfi->max_idx)
- start = 0;
- } while (start != end);
- rfi->pend_pkts -= skb_queue_len(skb_list);
+ skb->protocol = eth_type_trans(skb, (*ifp)->ndev);
+ return 0;
}
-static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
- struct sk_buff *pkt)
+void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
{
- u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
- struct brcmf_ampdu_rx_reorder *rfi;
- struct sk_buff_head reorder_list;
- struct sk_buff *pnext;
- u8 flags;
- u32 buf_size;
-
- flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
- flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
-
- /* validate flags and flow id */
- if (flags == 0xFF) {
- brcmf_err("invalid flags...so ignore this packet\n");
- brcmf_netif_rx(ifp, pkt);
- return;
- }
-
- rfi = ifp->drvr->reorder_flows[flow_id];
- if (flags & BRCMF_RXREORDER_DEL_FLOW) {
- brcmf_dbg(INFO, "flow-%d: delete\n",
- flow_id);
+ struct brcmf_if *ifp;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- if (rfi == NULL) {
- brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
- flow_id);
- brcmf_netif_rx(ifp, pkt);
- return;
- }
+ brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
- &reorder_list);
- /* add the last packet */
- __skb_queue_tail(&reorder_list, pkt);
- kfree(rfi);
- ifp->drvr->reorder_flows[flow_id] = NULL;
- goto netif_rx;
- }
- /* from here on we need a flow reorder instance */
- if (rfi == NULL) {
- buf_size = sizeof(*rfi);
- max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
-
- buf_size += (max_idx + 1) * sizeof(pkt);
-
- /* allocate space for flow reorder info */
- brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
- flow_id, max_idx);
- rfi = kzalloc(buf_size, GFP_ATOMIC);
- if (rfi == NULL) {
- brcmf_err("failed to alloc buffer\n");
- brcmf_netif_rx(ifp, pkt);
- return;
- }
+ if (brcmf_rx_hdrpull(drvr, skb, &ifp))
+ return;
- ifp->drvr->reorder_flows[flow_id] = rfi;
- rfi->pktslots = (struct sk_buff **)(rfi+1);
- rfi->max_idx = max_idx;
- }
- if (flags & BRCMF_RXREORDER_NEW_HOLE) {
- if (rfi->pend_pkts) {
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
- rfi->exp_idx,
- &reorder_list);
- WARN_ON(rfi->pend_pkts);
- } else {
- __skb_queue_head_init(&reorder_list);
- }
- rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
- rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
- rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
- rfi->pktslots[rfi->cur_idx] = pkt;
- rfi->pend_pkts++;
- brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
- flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
- } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
- cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
- exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
- if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
- /* still in the current hole */
- /* enqueue the current on the buffer chain */
- if (rfi->pktslots[cur_idx] != NULL) {
- brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
- brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
- rfi->pktslots[cur_idx] = NULL;
- }
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
- rfi->cur_idx = cur_idx;
- brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
- flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
- /* can return now as there is no reorder
- * list to process.
- */
- return;
- }
- if (rfi->exp_idx == cur_idx) {
- if (rfi->pktslots[cur_idx] != NULL) {
- brcmf_dbg(INFO, "error buffer pending..free it\n");
- brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
- rfi->pktslots[cur_idx] = NULL;
- }
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
-
- /* got the expected one. flush from current to expected
- * and update expected
- */
- brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
- flow_id, cur_idx, exp_idx, rfi->pend_pkts);
-
- rfi->cur_idx = cur_idx;
- rfi->exp_idx = exp_idx;
-
- brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
- &reorder_list);
- brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
- flow_id, skb_queue_len(&reorder_list),
- rfi->pend_pkts);
- } else {
- u8 end_idx;
-
- brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
- flow_id, flags, rfi->cur_idx, rfi->exp_idx,
- cur_idx, exp_idx);
- if (flags & BRCMF_RXREORDER_FLUSH_ALL)
- end_idx = rfi->exp_idx;
- else
- end_idx = exp_idx;
-
- /* flush pkts first */
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
- &reorder_list);
-
- if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
- __skb_queue_tail(&reorder_list, pkt);
- } else {
- rfi->pktslots[cur_idx] = pkt;
- rfi->pend_pkts++;
- }
- rfi->exp_idx = exp_idx;
- rfi->cur_idx = cur_idx;
- }
+ if (brcmf_proto_is_reorder_skb(skb)) {
+ brcmf_proto_rxreorder(ifp, skb);
} else {
- /* explicity window move updating the expected index */
- exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
-
- brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
- flow_id, flags, rfi->exp_idx, exp_idx);
- if (flags & BRCMF_RXREORDER_FLUSH_ALL)
- end_idx = rfi->exp_idx;
- else
- end_idx = exp_idx;
+ /* Process special event packets */
+ if (handle_event)
+ brcmf_fweh_process_skb(ifp->drvr, skb);
- brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
- &reorder_list);
- __skb_queue_tail(&reorder_list, pkt);
- /* set the new expected idx */
- rfi->exp_idx = exp_idx;
- }
-netif_rx:
- skb_queue_walk_safe(&reorder_list, pkt, pnext) {
- __skb_unlink(pkt, &reorder_list);
- brcmf_netif_rx(ifp, pkt);
+ brcmf_netif_rx(ifp, skb);
}
}
-void brcmf_rx_frame(struct device *dev, struct sk_buff *skb)
+void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
{
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
- struct brcmf_skb_reorder_data *rd;
- int ret;
- brcmf_dbg(DATA, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
-
- /* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, true, skb, &ifp);
+ brcmf_dbg(EVENT, "Enter: %s: rxp=%p\n", dev_name(dev), skb);
- if (ret || !ifp || !ifp->ndev) {
- if (ret != -ENODATA && ifp)
- ifp->stats.rx_errors++;
- brcmu_pkt_buf_free_skb(skb);
+ if (brcmf_rx_hdrpull(drvr, skb, &ifp))
return;
- }
- rd = (struct brcmf_skb_reorder_data *)skb->cb;
- if (rd->reorder)
- brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
- else
- brcmf_netif_rx(ifp, skb);
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+ brcmu_pkt_buf_free_skb(skb);
}
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success)
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index 7bdb6fef9..647d3cc2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -208,10 +208,6 @@ struct brcmf_if {
u8 ipv6addr_idx;
};
-struct brcmf_skb_reorder_data {
- u8 *reorder;
-};
-
int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
/* Return pointer to interface name */
@@ -227,6 +223,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
void brcmf_txfinalize(struct brcmf_if *ifp, struct sk_buff *txp, bool success);
void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb);
void brcmf_net_setcarrier(struct brcmf_if *ifp, bool on);
+void brcmf_c_set_joinpref_default(struct brcmf_if *ifp);
int __init brcmf_core_init(void);
void __exit brcmf_core_exit(void);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index d278c1117..875bf1356 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -29,6 +29,7 @@
#define BRCMF_FW_MAX_NVRAM_SIZE 64000
#define BRCMF_FW_NVRAM_DEVPATH_LEN 19 /* devpath0=pcie/1/4/ */
#define BRCMF_FW_NVRAM_PCIEDEV_LEN 10 /* pcie/1/4/ + \0 */
+#define BRCMF_FW_DEFAULT_BOARDREV "boardrev=0xff"
enum nvram_parser_state {
IDLE,
@@ -51,6 +52,7 @@ enum nvram_parser_state {
* @entry: start position of key,value entry.
* @multi_dev_v1: detect pcie multi device v1 (compressed).
* @multi_dev_v2: detect pcie multi device v2.
+ * @boardrev_found: nvram contains boardrev information.
*/
struct nvram_parser {
enum nvram_parser_state state;
@@ -63,6 +65,7 @@ struct nvram_parser {
u32 entry;
bool multi_dev_v1;
bool multi_dev_v2;
+ bool boardrev_found;
};
/**
@@ -125,6 +128,8 @@ static enum nvram_parser_state brcmf_nvram_handle_key(struct nvram_parser *nvp)
nvp->multi_dev_v1 = true;
if (strncmp(&nvp->data[nvp->entry], "pcie/", 5) == 0)
nvp->multi_dev_v2 = true;
+ if (strncmp(&nvp->data[nvp->entry], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
} else if (!is_nvram_char(c) || c == ' ') {
brcmf_dbg(INFO, "warning: ln=%d:col=%d: '=' expected, skip invalid key entry\n",
nvp->line, nvp->column);
@@ -284,6 +289,8 @@ static void brcmf_fw_strip_multi_v1(struct nvram_parser *nvp, u16 domain_nr,
while (i < nvp->nvram_len) {
if ((nvp->nvram[i] - '0' == id) && (nvp->nvram[i + 1] == ':')) {
i += 2;
+ if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
@@ -335,6 +342,8 @@ static void brcmf_fw_strip_multi_v2(struct nvram_parser *nvp, u16 domain_nr,
while (i < nvp->nvram_len - len) {
if (strncmp(&nvp->nvram[i], prefix, len) == 0) {
i += len;
+ if (strncmp(&nvp->nvram[i], "boardrev", 8) == 0)
+ nvp->boardrev_found = true;
while (nvp->nvram[i] != 0) {
nvram[j] = nvp->nvram[i];
i++;
@@ -356,6 +365,18 @@ fail:
nvp->nvram_len = 0;
}
+static void brcmf_fw_add_defaults(struct nvram_parser *nvp)
+{
+ if (nvp->boardrev_found)
+ return;
+
+ memcpy(&nvp->nvram[nvp->nvram_len], &BRCMF_FW_DEFAULT_BOARDREV,
+ strlen(BRCMF_FW_DEFAULT_BOARDREV));
+ nvp->nvram_len += strlen(BRCMF_FW_DEFAULT_BOARDREV);
+ nvp->nvram[nvp->nvram_len] = '\0';
+ nvp->nvram_len++;
+}
+
/* brcmf_nvram_strip :Takes a buffer of "<var>=<value>\n" lines read from a fil
* and ending in a NUL. Removes carriage returns, empty lines, comment lines,
* and converts newlines to NULs. Shortens buffer as needed and pads with NULs.
@@ -377,16 +398,21 @@ static void *brcmf_fw_nvram_strip(const u8 *data, size_t data_len,
if (nvp.state == END)
break;
}
- if (nvp.multi_dev_v1)
+ if (nvp.multi_dev_v1) {
+ nvp.boardrev_found = false;
brcmf_fw_strip_multi_v1(&nvp, domain_nr, bus_nr);
- else if (nvp.multi_dev_v2)
+ } else if (nvp.multi_dev_v2) {
+ nvp.boardrev_found = false;
brcmf_fw_strip_multi_v2(&nvp, domain_nr, bus_nr);
+ }
if (nvp.nvram_len == 0) {
kfree(nvp.nvram);
return NULL;
}
+ brcmf_fw_add_defaults(&nvp);
+
pad = nvp.nvram_len;
*new_length = roundup(nvp.nvram_len + 1, 4);
while (pad != *new_length) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
index d414fbbcc..b39056125 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c
@@ -371,6 +371,7 @@ int brcmf_fweh_activate_events(struct brcmf_if *ifp)
int i, err;
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
+ memset(eventmask, 0, sizeof(eventmask));
for (i = 0; i < BRCMF_E_LAST; i++) {
if (ifp->drvr->fweh.evt_handler[i]) {
brcmf_dbg(EVENT, "enable event %s\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
index 6b72df177..3a9a76dd9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h
@@ -78,6 +78,7 @@
#define BRCMF_C_SET_SCAN_CHANNEL_TIME 185
#define BRCMF_C_SET_SCAN_UNASSOC_TIME 187
#define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201
+#define BRCMF_C_SET_ASSOC_PREFER 205
#define BRCMF_C_GET_VALID_CHANNELS 217
#define BRCMF_C_GET_KEY_PRIMARY 235
#define BRCMF_C_SET_KEY_PRIMARY 236
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
index f82c9ab54..5b30922b6 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
@@ -92,6 +92,19 @@ enum brcmf_fws_tlv_len {
};
#undef BRCMF_FWS_TLV_DEF
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET 0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
+#define BRCMF_RXREORDER_FLAGS_OFFSET 4
+#define BRCMF_RXREORDER_CURIDX_OFFSET 6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
+
+#define BRCMF_RXREORDER_DEL_FLOW 0x01
+#define BRCMF_RXREORDER_FLUSH_ALL 0x02
+#define BRCMF_RXREORDER_CURIDX_VALID 0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
+#define BRCMF_RXREORDER_NEW_HOLE 0x10
+
#ifdef DEBUG
/*
* brcmf_fws_tlv_names - array of tlv names.
@@ -1614,6 +1627,202 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
return 0;
}
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+ u8 start, u8 end,
+ struct sk_buff_head *skb_list)
+{
+ /* initialize return list */
+ __skb_queue_head_init(skb_list);
+
+ if (rfi->pend_pkts == 0) {
+ brcmf_dbg(INFO, "no packets in reorder queue\n");
+ return;
+ }
+
+ do {
+ if (rfi->pktslots[start]) {
+ __skb_queue_tail(skb_list, rfi->pktslots[start]);
+ rfi->pktslots[start] = NULL;
+ }
+ start++;
+ if (start > rfi->max_idx)
+ start = 0;
+ } while (start != end);
+ rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
+{
+ u8 *reorder_data;
+ u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+ struct brcmf_ampdu_rx_reorder *rfi;
+ struct sk_buff_head reorder_list;
+ struct sk_buff *pnext;
+ u8 flags;
+ u32 buf_size;
+
+ reorder_data = ((struct brcmf_skb_reorder_data *)pkt->cb)->reorder;
+ flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+ flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ brcmf_err("invalid flags...so ignore this packet\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ rfi = ifp->drvr->reorder_flows[flow_id];
+ if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+ brcmf_dbg(INFO, "flow-%d: delete\n",
+ flow_id);
+
+ if (rfi == NULL) {
+ brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+ flow_id);
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+ &reorder_list);
+ /* add the last packet */
+ __skb_queue_tail(&reorder_list, pkt);
+ kfree(rfi);
+ ifp->drvr->reorder_flows[flow_id] = NULL;
+ goto netif_rx;
+ }
+ /* from here on we need a flow reorder instance */
+ if (rfi == NULL) {
+ buf_size = sizeof(*rfi);
+ max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+ buf_size += (max_idx + 1) * sizeof(pkt);
+
+ /* allocate space for flow reorder info */
+ brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+ flow_id, max_idx);
+ rfi = kzalloc(buf_size, GFP_ATOMIC);
+ if (rfi == NULL) {
+ brcmf_err("failed to alloc buffer\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ ifp->drvr->reorder_flows[flow_id] = rfi;
+ rfi->pktslots = (struct sk_buff **)(rfi + 1);
+ rfi->max_idx = max_idx;
+ }
+ if (flags & BRCMF_RXREORDER_NEW_HOLE) {
+ if (rfi->pend_pkts) {
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+ rfi->exp_idx,
+ &reorder_list);
+ WARN_ON(rfi->pend_pkts);
+ } else {
+ __skb_queue_head_init(&reorder_list);
+ }
+ rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+ rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+ rfi->pktslots[rfi->cur_idx] = pkt;
+ rfi->pend_pkts++;
+ brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+ flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+ } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+ cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ rfi->cur_idx = cur_idx;
+ brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ /* can return now as there is no reorder
+ * list to process.
+ */
+ return;
+ }
+ if (rfi->exp_idx == cur_idx) {
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "error buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+
+ /* got the expected one. flush from current to expected
+ * and update expected
+ */
+ brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ rfi->cur_idx = cur_idx;
+ rfi->exp_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+ &reorder_list);
+ brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+ flow_id, skb_queue_len(&reorder_list),
+ rfi->pend_pkts);
+ } else {
+ u8 end_idx;
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+ flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+ cur_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+
+ if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+ __skb_queue_tail(&reorder_list, pkt);
+ } else {
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ }
+ rfi->exp_idx = exp_idx;
+ rfi->cur_idx = cur_idx;
+ }
+ } else {
+ /* explicity window move updating the expected index */
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+ flow_id, flags, rfi->exp_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+ __skb_queue_tail(&reorder_list, pkt);
+ /* set the new expected idx */
+ rfi->exp_idx = exp_idx;
+ }
+netif_rx:
+ skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+ __skb_unlink(pkt, &reorder_list);
+ brcmf_netif_rx(ifp, pkt);
+ }
+}
+
void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb)
{
struct brcmf_skb_reorder_data *rd;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
index a36bac17e..ef0ad8597 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.h
@@ -29,5 +29,6 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp);
void brcmf_fws_del_interface(struct brcmf_if *ifp);
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb);
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked);
+void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb);
#endif /* FWSIGNAL_H_ */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
index 922966734..2b9a2bc42 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
@@ -526,6 +527,9 @@ static int brcmf_msgbuf_hdrpull(struct brcmf_pub *drvr, bool do_fws,
return -ENODEV;
}
+static void brcmf_msgbuf_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+}
static void
brcmf_msgbuf_remove_flowring(struct brcmf_msgbuf *msgbuf, u16 flowid)
@@ -1075,28 +1079,13 @@ static void brcmf_msgbuf_rxbuf_event_post(struct brcmf_msgbuf *msgbuf)
}
-static void
-brcmf_msgbuf_rx_skb(struct brcmf_msgbuf *msgbuf, struct sk_buff *skb,
- u8 ifidx)
-{
- struct brcmf_if *ifp;
-
- ifp = brcmf_get_ifp(msgbuf->drvr, ifidx);
- if (!ifp || !ifp->ndev) {
- brcmf_err("Received pkt for invalid ifidx %d\n", ifidx);
- brcmu_pkt_buf_free_skb(skb);
- return;
- }
- brcmf_netif_rx(ifp, skb);
-}
-
-
static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
{
struct msgbuf_rx_event *event;
u32 idx;
u16 buflen;
struct sk_buff *skb;
+ struct brcmf_if *ifp;
event = (struct msgbuf_rx_event *)buf;
idx = le32_to_cpu(event->msg.request_id);
@@ -1116,7 +1105,19 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
- brcmf_msgbuf_rx_skb(msgbuf, skb, event->msg.ifidx);
+ ifp = brcmf_get_ifp(msgbuf->drvr, event->msg.ifidx);
+ if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n",
+ event->msg.ifidx);
+ goto exit;
+ }
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
+
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+
+exit:
+ brcmu_pkt_buf_free_skb(skb);
}
@@ -1128,6 +1129,7 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
u16 data_offset;
u16 buflen;
u32 idx;
+ struct brcmf_if *ifp;
brcmf_msgbuf_update_rxbufpost_count(msgbuf, 1);
@@ -1148,7 +1150,16 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb_trim(skb, buflen);
- brcmf_msgbuf_rx_skb(msgbuf, skb, rx_complete->msg.ifidx);
+ ifp = brcmf_get_ifp(msgbuf->drvr, rx_complete->msg.ifidx);
+ if (!ifp || !ifp->ndev) {
+ brcmf_err("Received pkt for invalid ifidx %d\n",
+ rx_complete->msg.ifidx);
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ skb->protocol = eth_type_trans(skb, ifp->ndev);
+ brcmf_netif_rx(ifp, skb);
}
@@ -1460,6 +1471,7 @@ int brcmf_proto_msgbuf_attach(struct brcmf_pub *drvr)
drvr->proto->configure_addr_mode = brcmf_msgbuf_configure_addr_mode;
drvr->proto->delete_peer = brcmf_msgbuf_delete_peer;
drvr->proto->add_tdls_peer = brcmf_msgbuf_add_tdls_peer;
+ drvr->proto->rxreorder = brcmf_msgbuf_rxreorder;
drvr->proto->pd = msgbuf;
init_waitqueue_head(&msgbuf->ioctl_resp_wait);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index b5a49e564..a70cda6c0 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -1266,7 +1266,7 @@ static void
brcmf_p2p_stop_wait_next_action_frame(struct brcmf_cfg80211_info *cfg)
{
struct brcmf_p2p_info *p2p = &cfg->p2p;
- struct brcmf_if *ifp = cfg->escan_info.ifp;
+ struct brcmf_if *ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp;
if (test_bit(BRCMF_P2P_STATUS_SENDING_ACT_FRAME, &p2p->status) &&
(test_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status) ||
@@ -1430,8 +1430,8 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
freq = ieee80211_channel_to_frequency(ch.chnum,
ch.band == BRCMU_CHAN_BAND_2G ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
wdev = &ifp->vif->wdev;
cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0);
@@ -1900,8 +1900,8 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
mgmt_frame_len = e->datalen - sizeof(*rxframe);
freq = ieee80211_channel_to_frequency(ch.chnum,
ch.band == BRCMU_CHAN_BAND_2G ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ);
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ);
cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
index d55119d36..57531f421 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h
@@ -22,6 +22,9 @@ enum proto_addr_mode {
ADDR_DIRECT
};
+struct brcmf_skb_reorder_data {
+ u8 *reorder;
+};
struct brcmf_proto {
int (*hdrpull)(struct brcmf_pub *drvr, bool do_fws,
@@ -38,6 +41,7 @@ struct brcmf_proto {
u8 peer[ETH_ALEN]);
void (*add_tdls_peer)(struct brcmf_pub *drvr, int ifidx,
u8 peer[ETH_ALEN]);
+ void (*rxreorder)(struct brcmf_if *ifp, struct sk_buff *skb);
void *pd;
};
@@ -91,6 +95,18 @@ brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
{
drvr->proto->add_tdls_peer(drvr, ifidx, peer);
}
+static inline bool brcmf_proto_is_reorder_skb(struct sk_buff *skb)
+{
+ struct brcmf_skb_reorder_data *rd;
+
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ return !!rd->reorder;
+}
+static inline void
+brcmf_proto_rxreorder(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ ifp->drvr->proto->rxreorder(ifp, skb);
+}
#endif /* BRCMFMAC_PROTO_H */
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c5be36079..af4652047 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -535,9 +535,6 @@ static int qcount[NUMPRIO];
#define RETRYCHAN(chan) ((chan) == SDPCM_EVENT_CHANNEL)
-/* Retry count for register access failures */
-static const uint retry_limit = 2;
-
/* Limit on rounding up frames */
static const uint max_roundup = 512;
@@ -612,6 +609,7 @@ BRCMF_FW_NVRAM_DEF(4339, "/*(DEBLOBBED)*/", "brcmfmac4339-sdio.txt");
BRCMF_FW_NVRAM_DEF(43430, "/*(DEBLOBBED)*/", "brcmfmac43430-sdio.txt");
BRCMF_FW_NVRAM_DEF(43455, "/*(DEBLOBBED)*/", "brcmfmac43455-sdio.txt");
BRCMF_FW_NVRAM_DEF(4354, "/*(DEBLOBBED)*/", "brcmfmac4354-sdio.txt");
+BRCMF_FW_NVRAM_DEF(4356, "/*(DEBLOBBED)*/", "brcmfmac4356-sdio.txt");
static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143),
@@ -627,7 +625,8 @@ static struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = {
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4339_CHIP_ID, 0xFFFFFFFF, 4339),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_43430_CHIP_ID, 0xFFFFFFFF, 43430),
BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455),
- BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354)
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354),
+ BRCMF_FW_NVRAM_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356)
};
static void pkt_align(struct sk_buff *p, int len, int align)
@@ -1297,6 +1296,17 @@ static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
}
+static inline bool brcmf_sdio_fromevntchan(u8 *swheader)
+{
+ u32 hdrvalue;
+ u8 ret;
+
+ hdrvalue = *(u32 *)swheader;
+ ret = (u8)((hdrvalue & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT);
+
+ return (ret == SDPCM_EVENT_CHANNEL);
+}
+
static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
struct brcmf_sdio_hdrinfo *rd,
enum brcmf_sdio_frmtype type)
@@ -1644,7 +1654,11 @@ static u8 brcmf_sdio_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pfirst->len, pfirst->next,
pfirst->prev);
skb_unlink(pfirst, &bus->glom);
- brcmf_rx_frame(bus->sdiodev->dev, pfirst);
+ if (brcmf_sdio_fromevntchan(pfirst->data))
+ brcmf_rx_event(bus->sdiodev->dev, pfirst);
+ else
+ brcmf_rx_frame(bus->sdiodev->dev, pfirst,
+ false);
bus->sdcnt.rxglompkts++;
}
@@ -1970,18 +1984,19 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
__skb_trim(pkt, rd->len);
skb_pull(pkt, rd->dat_offset);
+ if (pkt->len == 0)
+ brcmu_pkt_buf_free_skb(pkt);
+ else if (rd->channel == SDPCM_EVENT_CHANNEL)
+ brcmf_rx_event(bus->sdiodev->dev, pkt);
+ else
+ brcmf_rx_frame(bus->sdiodev->dev, pkt,
+ false);
+
/* prepare the descriptor for the next read */
rd->len = rd->len_nxtfrm << 4;
rd->len_nxtfrm = 0;
/* treat all packet as event if we don't know */
rd->channel = SDPCM_EVENT_CHANNEL;
-
- if (pkt->len == 0) {
- brcmu_pkt_buf_free_skb(pkt);
- continue;
- }
-
- brcmf_rx_frame(bus->sdiodev->dev, pkt);
}
rxcount = maxframes - rxleft;
@@ -3261,7 +3276,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus,
const struct firmware *fw,
void *nvram, u32 nvlen)
{
- int bcmerror = -EFAULT;
+ int bcmerror;
u32 rstvec;
sdio_claim_host(bus->sdiodev->func[1]);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 7fb546547..16c9c8f81 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -514,7 +514,7 @@ static void brcmf_usb_rx_complete(struct urb *urb)
if (devinfo->bus_pub.state == BRCMFMAC_USB_STATE_UP) {
skb_put(skb, urb->actual_length);
- brcmf_rx_frame(devinfo->dev, skb);
+ brcmf_rx_frame(devinfo->dev, skb, true);
brcmf_usb_rx_refill(devinfo, req);
} else {
brcmu_pkt_buf_free_skb(skb);
@@ -1368,7 +1368,9 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->ifnum = desc->bInterfaceNumber;
- if (usb->speed == USB_SPEED_SUPER)
+ if (usb->speed == USB_SPEED_SUPER_PLUS)
+ brcmf_dbg(USB, "Broadcom super speed plus USB WLAN interface detected\n");
+ else if (usb->speed == USB_SPEED_SUPER)
brcmf_dbg(USB, "Broadcom super speed USB WLAN interface detected\n");
else if (usb->speed == USB_SPEED_HIGH)
brcmf_dbg(USB, "Broadcom high speed USB WLAN interface detected\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index 38bd5890b..3a03287fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -636,7 +636,7 @@ static void brcms_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
int i;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
if (!sband)
return;
@@ -666,7 +666,7 @@ brcms_reg_apply_beaconing_flags(struct wiphy *wiphy,
const struct ieee80211_reg_rule *rule;
int band, i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
@@ -710,7 +710,7 @@ static void brcms_reg_notifier(struct wiphy *wiphy,
brcms_reg_apply_beaconing_flags(wiphy, request->initiator);
/* Disable radio if all channels disallowed by regulatory */
- for (band = 0; !ch_found && band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; !ch_found && band < NUM_NL80211_BANDS; band++) {
sband = wiphy->bands[band];
if (!sband)
continue;
@@ -755,9 +755,9 @@ void brcms_c_regd_init(struct brcms_c_info *wlc)
&sup_chan);
if (band_idx == BAND_2G_INDEX)
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
else
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 4ab06e1e6..d5fc5d3a1 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -49,7 +49,7 @@
FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (freqency), \
.hw_value = (channel), \
.flags = chflags, \
@@ -58,7 +58,7 @@
}
#define CHAN5GHZ(channel, chflags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + 5*(channel), \
.hw_value = (channel), \
.flags = chflags, \
@@ -216,7 +216,7 @@ static struct ieee80211_rate legacy_ratetable[] = {
};
static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = brcms_2ghz_chantable,
.n_channels = ARRAY_SIZE(brcms_2ghz_chantable),
.bitrates = legacy_ratetable,
@@ -237,7 +237,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
};
static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = brcms_5ghz_nphy_chantable,
.n_channels = ARRAY_SIZE(brcms_5ghz_nphy_chantable),
.bitrates = legacy_ratetable + BRCMS_LEGACY_5G_RATE_OFFSET,
@@ -1025,8 +1025,8 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
int has_5g = 0;
u16 phy_type;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
phy_type = brcms_c_get_phy_type(wl->wlc, 0);
if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
@@ -1037,7 +1037,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
band->ht_cap.mcs.rx_mask[1] = 0;
band->ht_cap.mcs.rx_highest = cpu_to_le16(72);
}
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
} else {
return -EPERM;
}
@@ -1048,7 +1048,7 @@ static int ieee_hw_rate_init(struct ieee80211_hw *hw)
if (phy_type == PHY_TYPE_N || phy_type == PHY_TYPE_LCN) {
band = &wlc->bandstate[BAND_5G_INDEX]->band;
*band = brcms_band_5GHz_nphy_template;
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
} else {
return -EPERM;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 218cbc8bf..e16ee6063 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -7076,7 +7076,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
channel = BRCMS_CHAN_CHANNEL(rxh->RxChan);
rx_status->band =
- channel > 14 ? IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ channel > 14 ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
rx_status->freq =
ieee80211_channel_to_frequency(channel, rx_status->band);
@@ -7143,7 +7143,7 @@ prep_mac80211_status(struct brcms_c_info *wlc, struct d11rxhdr *rxh,
* a subset of the 2.4G rates. See bitrates field
* of brcms_band_5GHz_nphy (in mac80211_if.c).
*/
- if (rx_status->band == IEEE80211_BAND_5GHZ)
+ if (rx_status->band == NL80211_BAND_5GHZ)
rx_status->rate_idx -= BRCMS_LEGACY_5G_RATE_OFFSET;
/* Determine short preamble and rate_idx */
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index d2353f6e5..ca3cd2102 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -2026,7 +2026,7 @@ static int mpi_send_packet (struct net_device *dev)
} else {
*payloadLen = cpu_to_le16(len - sizeof(etherHead));
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* copy data into airo dma buffer */
memcpy(sendbuf, buffer, len);
@@ -2107,7 +2107,7 @@ static void airo_end_xmit(struct net_device *dev) {
i = 0;
if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
for (; i < MAX_FIDS / 2 && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
@@ -2174,7 +2174,7 @@ static void airo_end_xmit11(struct net_device *dev) {
i = MAX_FIDS / 2;
if ( status == SUCCESS ) {
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
for (; i < MAX_FIDS && (priv->fids[i] & 0xffff0000); i++);
} else {
priv->fids[fid] &= 0xffff;
@@ -5794,7 +5794,7 @@ static int airo_set_freq(struct net_device *dev,
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
- if((fwrq->m > 1000) || (fwrq->e > 0))
+ if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
@@ -5836,7 +5836,7 @@ static int airo_get_freq(struct net_device *dev,
ch = le16_to_cpu(status_rid.channel);
if((ch > 0) && (ch < 15)) {
fwrq->m = 100000 *
- ieee80211_channel_to_frequency(ch, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(ch, NL80211_BAND_2GHZ);
fwrq->e = 1;
} else {
fwrq->m = ch;
@@ -6894,7 +6894,7 @@ static int airo_get_range(struct net_device *dev,
for(i = 0; i < 14; i++) {
range->freq[k].i = i + 1; /* List index */
range->freq[k].m = 100000 *
- ieee80211_channel_to_frequency(i + 1, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ);
range->freq[k++].e = 1; /* Values in MHz -> * 10^5 * 10 */
}
range->num_frequency = k;
@@ -7302,7 +7302,7 @@ static inline char *airo_translate_scan(struct net_device *dev,
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = le16_to_cpu(bss->dsChannel);
iwe.u.freq.m = 100000 *
- ieee80211_channel_to_frequency(iwe.u.freq.m, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(iwe.u.freq.m, NL80211_BAND_2GHZ);
iwe.u.freq.e = 1;
current_ev = iwe_stream_add_event(info, current_ev, end_buf,
&iwe, IW_EV_FREQ_LEN);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 8b5fec510..02a299a89 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -1913,7 +1913,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
if (geo->bg_channels) {
struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
- bg_band->band = IEEE80211_BAND_2GHZ;
+ bg_band->band = NL80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
bg_band->channels = kcalloc(geo->bg_channels,
sizeof(struct ieee80211_channel),
@@ -1924,7 +1924,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
}
/* translate geo->bg to bg_band.channels */
for (i = 0; i < geo->bg_channels; i++) {
- bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+ bg_band->channels[i].band = NL80211_BAND_2GHZ;
bg_band->channels[i].center_freq = geo->bg[i].freq;
bg_band->channels[i].hw_value = geo->bg[i].channel;
bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -1945,7 +1945,7 @@ static int ipw2100_wdev_init(struct net_device *dev)
bg_band->bitrates = ipw2100_bg_rates;
bg_band->n_bitrates = RATE_COUNT;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
}
wdev->wiphy->cipher_suites = ipw_cipher_suites;
@@ -2954,7 +2954,7 @@ static int __ipw2100_tx_process(struct ipw2100_priv *priv)
/* A packet was processed by the hardware, so update the
* watchdog */
- priv->net_dev->trans_start = jiffies;
+ netif_trans_update(priv->net_dev);
break;
@@ -3521,7 +3521,7 @@ static void ipw2100_msg_free(struct ipw2100_priv *priv)
static ssize_t show_pci(struct device *d, struct device_attribute *attr,
char *buf)
{
- struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev);
+ struct pci_dev *pci_dev = to_pci_dev(d);
char *out = buf;
int i, j;
u32 val;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index 888eae128..9f02e7e78 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -7707,7 +7707,7 @@ static void ipw_handle_data_packet(struct ipw_priv *priv,
struct ipw_rx_packet *pkt = (struct ipw_rx_packet *)rxb->skb->data;
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* We only process data packets if the
* interface is open */
@@ -7770,7 +7770,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
unsigned short len = le16_to_cpu(pkt->u.frame.length);
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* We only process data packets if the
* interface is open */
@@ -7952,7 +7952,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
return;
/* We received data from the HW, so stop the watchdog */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (unlikely((len + IPW_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) {
dev->stats.rx_errors++;
@@ -11359,7 +11359,7 @@ static int ipw_wdev_init(struct net_device *dev)
if (geo->bg_channels) {
struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
- bg_band->band = IEEE80211_BAND_2GHZ;
+ bg_band->band = NL80211_BAND_2GHZ;
bg_band->n_channels = geo->bg_channels;
bg_band->channels = kcalloc(geo->bg_channels,
sizeof(struct ieee80211_channel),
@@ -11370,7 +11370,7 @@ static int ipw_wdev_init(struct net_device *dev)
}
/* translate geo->bg to bg_band.channels */
for (i = 0; i < geo->bg_channels; i++) {
- bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
+ bg_band->channels[i].band = NL80211_BAND_2GHZ;
bg_band->channels[i].center_freq = geo->bg[i].freq;
bg_band->channels[i].hw_value = geo->bg[i].channel;
bg_band->channels[i].max_power = geo->bg[i].max_power;
@@ -11391,14 +11391,14 @@ static int ipw_wdev_init(struct net_device *dev)
bg_band->bitrates = ipw2200_bg_rates;
bg_band->n_bitrates = ipw2200_num_bg_rates;
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = bg_band;
}
/* fill-out priv->ieee->a_band */
if (geo->a_channels) {
struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
- a_band->band = IEEE80211_BAND_5GHZ;
+ a_band->band = NL80211_BAND_5GHZ;
a_band->n_channels = geo->a_channels;
a_band->channels = kcalloc(geo->a_channels,
sizeof(struct ieee80211_channel),
@@ -11409,7 +11409,7 @@ static int ipw_wdev_init(struct net_device *dev)
}
/* translate geo->a to a_band.channels */
for (i = 0; i < geo->a_channels; i++) {
- a_band->channels[i].band = IEEE80211_BAND_5GHZ;
+ a_band->channels[i].band = NL80211_BAND_5GHZ;
a_band->channels[i].center_freq = geo->a[i].freq;
a_band->channels[i].hw_value = geo->a[i].channel;
a_band->channels[i].max_power = geo->a[i].max_power;
@@ -11430,7 +11430,7 @@ static int ipw_wdev_init(struct net_device *dev)
a_band->bitrates = ipw2200_a_rates;
a_band->n_bitrates = ipw2200_num_a_rates;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
+ wdev->wiphy->bands[NL80211_BAND_5GHZ] = a_band;
}
wdev->wiphy->cipher_suites = ipw_cipher_suites;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index e8b5545ca..260d6a3a4 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -1547,7 +1547,7 @@ il3945_irq_tasklet(struct il_priv *il)
}
static int
-il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+il3945_get_channels_for_scan(struct il_priv *il, enum nl80211_band band,
u8 is_active, u8 n_probes,
struct il3945_scan_channel *scan_ch,
struct ieee80211_vif *vif)
@@ -1618,7 +1618,7 @@ il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
/* scan_pwr_info->tpc.dsp_atten; */
/*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
else {
scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
@@ -2534,7 +2534,7 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
};
struct il3945_scan_cmd *scan;
u8 n_probes = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
bool is_active = false;
int ret;
u16 len;
@@ -2615,14 +2615,14 @@ il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
/* flags + rate selection */
switch (il->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
scan->tx_cmd.rate = RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
scan->tx_cmd.rate = RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
break;
default:
IL_WARN("Invalid scan band\n");
@@ -3507,7 +3507,7 @@ il3945_init_drv(struct il_priv *il)
il->ieee_channels = NULL;
il->ieee_rates = NULL;
- il->band = IEEE80211_BAND_2GHZ;
+ il->band = NL80211_BAND_2GHZ;
il->iw_mode = NL80211_IFTYPE_STATION;
il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
@@ -3582,13 +3582,13 @@ il3945_setup_mac(struct il_priv *il)
/* Default value; 4 EDCA QOS priorities */
hw->queues = 4;
- if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[NL80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &il->bands[NL80211_BAND_2GHZ];
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &il->bands[IEEE80211_BAND_5GHZ];
+ if (il->bands[NL80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &il->bands[NL80211_BAND_5GHZ];
il_leds_init(il);
@@ -3761,7 +3761,7 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_release_irq;
}
- il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5]);
+ il_set_rxon_channel(il, &il->bands[NL80211_BAND_2GHZ].channels[5]);
il3945_setup_deferred_work(il);
il3945_setup_handlers(il);
il_power_initialize(il);
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
index 76b0729ad..03ad9b8b5 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c
@@ -97,7 +97,7 @@ static struct il3945_tpt_entry il3945_tpt_table_g[] = {
#define RATE_RETRY_TH 15
static u8
-il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+il3945_get_rate_idx_by_rssi(s32 rssi, enum nl80211_band band)
{
u32 idx = 0;
u32 table_size = 0;
@@ -107,11 +107,11 @@ il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
rssi = IL_MIN_RSSI_VAL;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
tpt_table = il3945_tpt_table_g;
table_size = ARRAY_SIZE(il3945_tpt_table_g);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
tpt_table = il3945_tpt_table_a;
table_size = ARRAY_SIZE(il3945_tpt_table_a);
break;
@@ -380,7 +380,7 @@ il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
/* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (sband->band == NL80211_BAND_5GHZ) {
rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
}
@@ -541,7 +541,7 @@ il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
static u16
il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u8 high = RATE_INVALID;
u8 low = RATE_INVALID;
@@ -549,7 +549,7 @@ il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
/* 802.11A walks to the next literal adjacent rate in
* the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ if (unlikely(band == NL80211_BAND_5GHZ)) {
int i;
u32 mask;
@@ -657,14 +657,14 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
/* get user max rate if set */
max_rate_idx = txrc->max_rate_idx;
- if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ if (sband->band == NL80211_BAND_5GHZ && max_rate_idx != -1)
max_rate_idx += IL_FIRST_OFDM_RATE;
if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
max_rate_idx = -1;
idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
spin_lock_irqsave(&rs_sta->lock, flags);
@@ -806,7 +806,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
out:
- if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (sband->band == NL80211_BAND_5GHZ) {
if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
idx = IL_FIRST_OFDM_RATE;
rs_sta->last_txrate_idx = idx;
@@ -935,7 +935,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
rs_sta->tgg = 0;
switch (il->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
/* TODO: this always does G, not a regression */
if (il->active.flags & RXON_FLG_TGG_PROTECT_MSK) {
rs_sta->tgg = 1;
@@ -943,7 +943,7 @@ il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
} else
rs_sta->expected_tpt = il3945_expected_tpt_g;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rs_sta->expected_tpt = il3945_expected_tpt_a;
break;
default:
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index 93bdf684b..7bcedbb53 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -255,13 +255,13 @@ il3945_rs_next_rate(struct il_priv *il, int rate)
int next_rate = il3945_get_prev_ieee_rate(rate);
switch (il->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
if (rate == RATE_12M_IDX)
next_rate = RATE_9M_IDX;
else if (rate == RATE_6M_IDX)
next_rate = RATE_6M_IDX;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
il_is_associated(il)) {
if (rate == RATE_11M_IDX)
@@ -349,7 +349,7 @@ il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
/* Fill the MRR chain with some info about on-chip retransmissions */
rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx -= IL_FIRST_OFDM_RATE;
fail = tx_resp->failure_frame;
@@ -554,14 +554,14 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.mactime = le64_to_cpu(rx_end->timestamp);
rx_status.band =
(rx_hdr->
- phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
rx_status.band);
rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
+ if (rx_status.band == NL80211_BAND_5GHZ)
rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
rx_status.antenna =
@@ -1409,7 +1409,7 @@ il3945_send_tx_power(struct il_priv *il)
chan = le16_to_cpu(il->active.channel);
- txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ txpower.band = (il->band == NL80211_BAND_5GHZ) ? 0 : 1;
ch_info = il_get_channel_info(il, il->band, chan);
if (!ch_info) {
IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
@@ -2310,7 +2310,7 @@ il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
(il->band ==
- IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ NL80211_BAND_5GHZ) ? RATE_6M_PLCP :
RATE_1M_PLCP);
il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
@@ -2343,7 +2343,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
}
switch (il->band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
D_RATE("Select A mode rate scale\n");
/* If one of the following CCK rates is used,
* have it fall back to the 6M OFDM rate */
@@ -2359,7 +2359,7 @@ il3945_init_hw_rate_table(struct il_priv *il)
il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
D_RATE("Select B/G mode rate scale\n");
/* If an OFDM rate is used, have it fall back to the
* 1M CCK rates */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index baee8fe03..a27559854 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -457,7 +457,7 @@ il4965_rxq_stop(struct il_priv *il)
}
int
-il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
{
int idx = 0;
int band_offset = 0;
@@ -468,7 +468,7 @@ il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return idx;
/* Legacy rate format, search for match in table */
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -688,8 +688,8 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band =
(phy_res->
- phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
@@ -766,7 +766,7 @@ il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
static int
il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
- enum ieee80211_band band, u8 is_active,
+ enum nl80211_band band, u8 is_active,
u8 n_probes, struct il_scan_channel *scan_ch)
{
struct ieee80211_channel *chan;
@@ -822,7 +822,7 @@ il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -870,7 +870,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
u32 rate_flags = 0;
u16 cmd_len;
u16 rx_chain = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 n_probes = 0;
u8 rx_ant = il->hw_params.valid_rx_ant;
u8 rate;
@@ -944,7 +944,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (il->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod =
le32_to_cpu(il->active.flags & RXON_FLG_CHANNEL_MODE_MSK) >>
@@ -956,7 +956,7 @@ il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
rate_flags = RATE_MCS_CCK_MSK;
}
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rate = RATE_6M_PLCP;
break;
default:
@@ -1590,7 +1590,7 @@ il4965_tx_cmd_build_rate(struct il_priv *il,
|| rate_idx > RATE_COUNT_LEGACY)
rate_idx = rate_lowest_index(&il->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = il_rates[rate_idx].plcp;
@@ -3051,7 +3051,7 @@ il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
}
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
- if (il->band == IEEE80211_BAND_5GHZ)
+ if (il->band == NL80211_BAND_5GHZ)
r = RATE_6M_IDX;
else
r = RATE_1M_IDX;
@@ -5553,6 +5553,7 @@ __il4965_up(struct il_priv *il)
il4965_prepare_card_hw(il);
if (!il->hw_ready) {
+ il_dealloc_bcast_stations(il);
IL_ERR("HW not ready\n");
return -EIO;
}
@@ -5564,6 +5565,7 @@ __il4965_up(struct il_priv *il)
set_bit(S_RFKILL, &il->status);
wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+ il_dealloc_bcast_stations(il);
il_enable_rfkill_int(il);
IL_WARN("Radio disabled by HW RF Kill switch\n");
return 0;
@@ -5577,6 +5579,7 @@ __il4965_up(struct il_priv *il)
ret = il4965_hw_nic_init(il);
if (ret) {
IL_ERR("Unable to init nic\n");
+ il_dealloc_bcast_stations(il);
return ret;
}
@@ -5787,12 +5790,12 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
- if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &il->bands[IEEE80211_BAND_2GHZ];
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
- il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &il->bands[IEEE80211_BAND_5GHZ];
+ if (il->bands[NL80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &il->bands[NL80211_BAND_2GHZ];
+ if (il->bands[NL80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &il->bands[NL80211_BAND_5GHZ];
il_leds_init(il);
@@ -6365,7 +6368,7 @@ il4965_init_drv(struct il_priv *il)
il->ieee_channels = NULL;
il->ieee_rates = NULL;
- il->band = IEEE80211_BAND_2GHZ;
+ il->band = NL80211_BAND_2GHZ;
il->iw_mode = NL80211_IFTYPE_STATION;
il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
@@ -6477,7 +6480,7 @@ il4965_set_hw_params(struct il_priv *il)
il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+ il->hw_params.ht40_channel = BIT(NL80211_BAND_5GHZ);
il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-rs.c b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
index bac60b2bc..a867ae7f4 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-rs.c
@@ -549,7 +549,7 @@ il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
*/
static int
il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct il_scale_tbl_info *tbl, int *rate_idx)
{
u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
@@ -574,7 +574,7 @@ il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
if (il4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -743,7 +743,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
switch_to_legacy = 1;
scale_idx = rs_ht_to_legacy[scale_idx];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -762,7 +762,7 @@ il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
/* Mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
/* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate_mask =
(u16) (rate_mask &
(lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
@@ -851,7 +851,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
table = &lq_sta->lq;
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
- if (il->band == IEEE80211_BAND_5GHZ)
+ if (il->band == NL80211_BAND_5GHZ)
rs_idx -= IL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_idx = info->status.rates[0].idx;
@@ -864,7 +864,7 @@ il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
* mac80211 HT idx is always zero-idxed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
- if (il->band == IEEE80211_BAND_2GHZ)
+ if (il->band == NL80211_BAND_2GHZ)
mac_idx += IL_FIRST_OFDM_RATE;
}
/* Here we actually compare this rate to the latest LQ command */
@@ -1816,7 +1816,7 @@ il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
/* supp_rates has no CCK bits in A mode */
rate_scale_idx_msk =
(u16) (rate_mask &
@@ -2212,7 +2212,7 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
- if (sband->band == IEEE80211_BAND_5GHZ &&
+ if (sband->band == NL80211_BAND_5GHZ &&
lq_sta->max_rate_idx != -1)
lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
if (lq_sta->max_rate_idx < 0 ||
@@ -2258,11 +2258,11 @@ il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
} else {
/* Check for invalid rates */
if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
- (sband->band == IEEE80211_BAND_5GHZ &&
+ (sband->band == NL80211_BAND_5GHZ &&
rate_idx < IL_FIRST_OFDM_RATE))
rate_idx = rate_lowest_index(sband, sta);
/* On valid 5 GHz rate, adjust idx */
- else if (sband->band == IEEE80211_BAND_5GHZ)
+ else if (sband->band == NL80211_BAND_5GHZ)
rate_idx -= IL_FIRST_OFDM_RATE;
info->control.rates[0].flags = 0;
}
@@ -2362,7 +2362,7 @@ il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
/* Set last_txrate_idx to lowest rate */
lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.c b/drivers/net/wireless/intel/iwlegacy/4965.c
index dd2c478bd..b353010d6 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965.c
@@ -1267,7 +1267,7 @@ il4965_send_tx_power(struct il_priv *il)
"TX Power requested while scanning!\n"))
return -EAGAIN;
- band = il->band == IEEE80211_BAND_2GHZ;
+ band = il->band == NL80211_BAND_2GHZ;
is_ht40 = iw4965_is_ht40_channel(il->active.flags);
@@ -1480,7 +1480,7 @@ il4965_hw_channel_switch(struct il_priv *il,
u8 switch_count;
u16 beacon_interval = le16_to_cpu(il->timing.beacon_interval);
struct ieee80211_vif *vif = il->vif;
- band = (il->band == IEEE80211_BAND_2GHZ);
+ band = (il->band == NL80211_BAND_2GHZ);
if (WARN_ON_ONCE(vif == NULL))
return -EIO;
@@ -1918,7 +1918,7 @@ struct il_cfg il4965_cfg = {
* Force use of chains B and C for scan RX on 5 GHz band
* because the device has off-channel reception on chain A.
*/
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+ .scan_rx_antennas[NL80211_BAND_5GHZ] = ANT_BC,
.eeprom_size = IL4965_EEPROM_IMG_SIZE,
.num_of_queues = IL49_NUM_QUEUES,
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index e432715e0..527e8b531 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -68,7 +68,7 @@ void il4965_rx_replenish(struct il_priv *il);
void il4965_rx_replenish_now(struct il_priv *il);
void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
int il4965_rxq_stop(struct il_priv *il);
-int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
void il4965_rx_handle(struct il_priv *il);
/* tx */
diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c
index eb5cb603b..eb24b9241 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.c
+++ b/drivers/net/wireless/intel/iwlegacy/common.c
@@ -723,10 +723,9 @@ il_eeprom_init(struct il_priv *il)
sz = il->cfg->eeprom_size;
D_EEPROM("NVM size = %d\n", sz);
il->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!il->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
+ if (!il->eeprom)
+ return -ENOMEM;
+
e = (__le16 *) il->eeprom;
il->ops->apm_init(il);
@@ -778,7 +777,6 @@ err:
il_eeprom_free(il);
/* Reset chip to save power until we load uCode during "up". */
il_apm_stop(il);
-alloc_err:
return ret;
}
EXPORT_SYMBOL(il_eeprom_init);
@@ -862,7 +860,7 @@ il_init_band_reference(const struct il_priv *il, int eep_band,
* Does not set up a command, or touch hardware.
*/
static int
-il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+il_mod_ht40_chan_info(struct il_priv *il, enum nl80211_band band, u16 channel,
const struct il_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
@@ -947,7 +945,7 @@ il_init_channel_map(struct il_priv *il)
ch_info->channel = eeprom_ch_idx[ch];
ch_info->band =
(band ==
- 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ 1) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
/* permanently store EEPROM's channel regulatory flags
* and max power in channel info database. */
@@ -1005,14 +1003,14 @@ il_init_channel_map(struct il_priv *il)
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
+ enum nl80211_band ieeeband;
il_init_band_reference(il, band, &eeprom_ch_count,
&eeprom_ch_info, &eeprom_ch_idx);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ (band == 6) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch = 0; ch < eeprom_ch_count; ch++) {
@@ -1050,19 +1048,19 @@ EXPORT_SYMBOL(il_free_channel_map);
* Based on band and channel number.
*/
const struct il_channel_info *
-il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+il_get_channel_info(const struct il_priv *il, enum nl80211_band band,
u16 channel)
{
int i;
switch (band) {
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
for (i = 14; i < il->channel_count; i++) {
if (il->channel_info[i].channel == channel)
return &il->channel_info[i];
}
break;
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
if (channel >= 1 && channel <= 14)
return &il->channel_info[channel - 1];
break;
@@ -1459,7 +1457,7 @@ il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
clear_bit(S_SCAN_HW, &il->status);
D_SCAN("Scan on %sGHz took %dms\n",
- (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ (il->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - il->scan_start));
queue_work(il->workqueue, &il->scan_completed);
@@ -1477,10 +1475,10 @@ il_setup_rx_scan_handlers(struct il_priv *il)
EXPORT_SYMBOL(il_setup_rx_scan_handlers);
u16
-il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
u8 n_probes)
{
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
return IL_ACTIVE_DWELL_TIME_52 +
IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
else
@@ -1490,14 +1488,14 @@ il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
EXPORT_SYMBOL(il_get_active_dwell_time);
u16
-il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif)
{
u16 value;
u16 passive =
(band ==
- IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ NL80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
IL_PASSIVE_DWELL_TIME_52;
@@ -1522,10 +1520,10 @@ void
il_init_scan_params(struct il_priv *il)
{
u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
- if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
- il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
- il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+ if (!il->scan_tx_ant[NL80211_BAND_5GHZ])
+ il->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[NL80211_BAND_2GHZ])
+ il->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
}
EXPORT_SYMBOL(il_init_scan_params);
@@ -2005,7 +2003,7 @@ il_prep_station(struct il_priv *il, const u8 *addr, bool is_ap,
il_set_ht_add_station(il, sta_id, sta);
/* 3945 only */
- rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ rate = (il->band == NL80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
/* Turn on both antennas for the station... */
station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
@@ -2794,8 +2792,10 @@ il_tx_queue_free(struct il_priv *il, int txq_id)
il_tx_queue_unmap(il, txq_id);
/* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
+ if (txq->cmd) {
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+ }
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
@@ -2873,8 +2873,10 @@ il_cmd_queue_free(struct il_priv *il)
il_cmd_queue_unmap(il);
/* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
+ if (txq->cmd) {
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+ }
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
@@ -3080,7 +3082,9 @@ err:
kfree(txq->cmd[i]);
out_free_arrays:
kfree(txq->meta);
+ txq->meta = NULL;
kfree(txq->cmd);
+ txq->cmd = NULL;
return -ENOMEM;
}
@@ -3378,7 +3382,7 @@ EXPORT_SYMBOL(il_bcast_addr);
static void
il_init_ht_hw_capab(const struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u16 max_bit_rate = 0;
u8 rx_chains_num = il->hw_params.rx_chains_num;
@@ -3439,8 +3443,8 @@ il_init_geos(struct il_priv *il)
int i = 0;
s8 max_tx_power = 0;
- if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ if (il->bands[NL80211_BAND_2GHZ].n_bitrates ||
+ il->bands[NL80211_BAND_5GHZ].n_bitrates) {
D_INFO("Geography modes already initialized.\n");
set_bit(S_GEO_CONFIGURED, &il->status);
return 0;
@@ -3461,23 +3465,23 @@ il_init_geos(struct il_priv *il)
}
/* 5.2GHz channels start after the 2.4GHz channels */
- sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband = &il->bands[NL80211_BAND_5GHZ];
sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
/* just OFDM */
sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
if (il->cfg->sku & IL_SKU_N)
- il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+ il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_5GHZ);
- sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband = &il->bands[NL80211_BAND_2GHZ];
sband->channels = channels;
/* OFDM & CCK */
sband->bitrates = rates;
sband->n_bitrates = RATE_COUNT_LEGACY;
if (il->cfg->sku & IL_SKU_N)
- il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+ il_init_ht_hw_capab(il, &sband->ht_cap, NL80211_BAND_2GHZ);
il->ieee_channels = channels;
il->ieee_rates = rates;
@@ -3528,7 +3532,7 @@ il_init_geos(struct il_priv *il)
il->tx_power_user_lmt = max_tx_power;
il->tx_power_next = max_tx_power;
- if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ if (il->bands[NL80211_BAND_5GHZ].n_channels == 0 &&
(il->cfg->sku & IL_SKU_A)) {
IL_INFO("Incorrectly detected BG card as ABG. "
"Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
@@ -3537,8 +3541,8 @@ il_init_geos(struct il_priv *il)
}
IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- il->bands[IEEE80211_BAND_2GHZ].n_channels,
- il->bands[IEEE80211_BAND_5GHZ].n_channels);
+ il->bands[NL80211_BAND_2GHZ].n_channels,
+ il->bands[NL80211_BAND_5GHZ].n_channels);
set_bit(S_GEO_CONFIGURED, &il->status);
@@ -3559,7 +3563,7 @@ il_free_geos(struct il_priv *il)
EXPORT_SYMBOL(il_free_geos);
static bool
-il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+il_is_channel_extension(struct il_priv *il, enum nl80211_band band,
u16 channel, u8 extension_chan_offset)
{
const struct il_channel_info *ch_info;
@@ -3922,14 +3926,14 @@ EXPORT_SYMBOL(il_set_rxon_ht);
/* Return valid, unused, channel for a passive scan to reset the RF */
u8
-il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+il_get_single_channel_number(struct il_priv *il, enum nl80211_band band)
{
const struct il_channel_info *ch_info;
int i;
u8 channel = 0;
u8 min, max;
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
min = 14;
max = il->channel_count;
} else {
@@ -3961,14 +3965,14 @@ EXPORT_SYMBOL(il_get_single_channel_number);
int
il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
{
- enum ieee80211_band band = ch->band;
+ enum nl80211_band band = ch->band;
u16 channel = ch->hw_value;
if (le16_to_cpu(il->staging.channel) == channel && il->band == band)
return 0;
il->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
il->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
il->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -3982,10 +3986,10 @@ il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch)
EXPORT_SYMBOL(il_set_rxon_channel);
void
-il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif)
{
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
il->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
RXON_FLG_CCK_MSK);
@@ -5411,7 +5415,7 @@ il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (changes & BSS_CHANGED_ERP_CTS_PROT) {
D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ if (bss_conf->use_cts_prot && il->band != NL80211_BAND_5GHZ)
il->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
il->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlegacy/common.h b/drivers/net/wireless/intel/iwlegacy/common.h
index ce52cf114..726ede391 100644
--- a/drivers/net/wireless/intel/iwlegacy/common.h
+++ b/drivers/net/wireless/intel/iwlegacy/common.h
@@ -432,7 +432,7 @@ u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
int il_init_channel_map(struct il_priv *il);
void il_free_channel_map(struct il_priv *il);
const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
- enum ieee80211_band band,
+ enum nl80211_band band,
u16 channel);
#define IL_NUM_SCAN_RATES (2)
@@ -497,7 +497,7 @@ struct il_channel_info {
u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
+ enum nl80211_band band;
/* HT40 channel info */
s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
@@ -811,7 +811,7 @@ struct il_sensitivity_ranges {
* @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
* @max_stations:
* @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * BIT(NL80211_BAND_5GHZ) BIT(NL80211_BAND_5GHZ)
* @sw_crypto: 0 for hw, 1 for sw
* @max_xxx_size: for ucode uses
* @ct_kill_threshold: temperature threshold
@@ -1141,13 +1141,13 @@ struct il_priv {
struct list_head free_frames;
int frames_count;
- enum ieee80211_band band;
+ enum nl80211_band band;
int alloc_rxb_page;
void (*handlers[IL_CN_MAX]) (struct il_priv *il,
struct il_rx_buf *rxb);
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
/* spectrum measurement report caching */
struct il_spectrum_notification measure_report;
@@ -1176,10 +1176,10 @@ struct il_priv {
unsigned long scan_start;
unsigned long scan_start_tsf;
void *scan_cmd;
- enum ieee80211_band scan_band;
+ enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 scan_tx_ant[NUM_NL80211_BANDS];
u8 mgmt_tx_ant;
/* spinlock */
@@ -1479,7 +1479,7 @@ il_is_channel_radar(const struct il_channel_info *ch_info)
static inline u8
il_is_channel_a_band(const struct il_channel_info *ch_info)
{
- return ch_info->band == IEEE80211_BAND_5GHZ;
+ return ch_info->band == NL80211_BAND_5GHZ;
}
static inline int
@@ -1673,7 +1673,7 @@ struct il_cfg {
/* params not likely to change within a device family */
struct il_base_params *base_params;
/* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ u8 scan_rx_antennas[NUM_NL80211_BANDS];
enum il_led_mode led_mode;
int eeprom_size;
@@ -1707,9 +1707,9 @@ void il_set_rxon_hwcrypto(struct il_priv *il, int hw_decrypt);
int il_check_rxon_cmd(struct il_priv *il);
int il_full_rxon_required(struct il_priv *il);
int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch);
-void il_set_flags_for_band(struct il_priv *il, enum ieee80211_band band,
+void il_set_flags_for_band(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
-u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+u8 il_get_single_channel_number(struct il_priv *il, enum nl80211_band band);
void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
bool il_is_ht40_tx_allowed(struct il_priv *il,
struct ieee80211_sta_ht_cap *ht_cap);
@@ -1793,9 +1793,9 @@ int il_force_reset(struct il_priv *il, bool external);
u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
const u8 *ta, const u8 *ie, int ie_len, int left);
void il_setup_rx_scan_handlers(struct il_priv *il);
-u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_active_dwell_time(struct il_priv *il, enum nl80211_band band,
u8 n_probes);
-u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+u16 il_get_passive_dwell_time(struct il_priv *il, enum nl80211_band band,
struct ieee80211_vif *vif);
void il_setup_scan_deferred_work(struct il_priv *il);
void il_cancel_scan_deferred_work(struct il_priv *il);
@@ -1955,7 +1955,7 @@ il_commit_rxon(struct il_priv *il)
}
static inline const struct ieee80211_supported_band *
-il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+il_get_hw_mode(struct il_priv *il, enum nl80211_band band)
{
return il->hw->wiphy->bands[band];
}
@@ -2813,7 +2813,7 @@ struct il_lq_sta {
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
u8 is_dup;
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; RATE_6M_MASK, etc. */
u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlegacy/debug.c b/drivers/net/wireless/intel/iwlegacy/debug.c
index 908b9f4fe..6fc6b7ff9 100644
--- a/drivers/net/wireless/intel/iwlegacy/debug.c
+++ b/drivers/net/wireless/intel/iwlegacy/debug.c
@@ -544,7 +544,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
return -ENOMEM;
}
- supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ supp_band = il_get_hw_mode(il, NL80211_BAND_2GHZ);
if (supp_band) {
channels = supp_band->channels;
@@ -571,7 +571,7 @@ il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
flags & IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
- supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ supp_band = il_get_hw_mode(il, NL80211_BAND_5GHZ);
if (supp_band) {
channels = supp_band->channels;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 16c4f3834..b64db47b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -88,16 +88,6 @@ config IWLWIFI_BCAST_FILTERING
If unsure, don't enable this option, as some programs might
expect incoming broadcasts for their normal operations.
-config IWLWIFI_UAPSD
- bool "enable U-APSD by default"
- depends on IWLMVM
- help
- Say Y here to enable U-APSD by default. This may cause
- interoperability problems with some APs, manifesting in lower than
- expected throughput due to those APs not enabling aggregation
-
- If unsure, say N.
-
config IWLWIFI_PCIE_RTPM
bool "Enable runtime power management mode for PCIe devices"
depends on IWLMVM && PM
@@ -144,12 +134,6 @@ config IWLWIFI_DEBUGFS
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- bool "Experimental uCode support"
- depends on IWLWIFI_DEBUG
- ---help---
- Enable use of experimental ucode for testing and debugging.
-
config IWLWIFI_DEVICE_TRACING
bool "iwlwifi device access tracing"
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 9de277c6c..b79e38734 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -158,7 +158,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx);
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_vif *vif);
/* uCode */
@@ -186,7 +186,7 @@ int iwl_send_statistics_request(struct iwl_priv *priv,
u8 flags, bool clear);
static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
+ struct iwl_priv *priv, enum nl80211_band band)
{
return priv->hw->wiphy->bands[band];
}
@@ -198,7 +198,7 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan);
#endif
/* rx */
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band);
void iwl_setup_rx_handlers(struct iwl_priv *priv);
void iwl_chswitch_done(struct iwl_priv *priv, bool is_success);
@@ -258,7 +258,7 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv);
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum iwl_scan_type scan_type,
- enum ieee80211_band band);
+ enum nl80211_band band);
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 74c516152..f6591c83d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -335,7 +335,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
+ supp_band = iwl_get_hw_mode(priv, NL80211_BAND_2GHZ);
if (supp_band) {
channels = supp_band->channels;
@@ -358,7 +358,7 @@ static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
IEEE80211_CHAN_NO_IR ?
"passive only" : "active/passive");
}
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
+ supp_band = iwl_get_hw_mode(priv, NL80211_BAND_5GHZ);
if (supp_band) {
channels = supp_band->channels;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 1a7ead753..8148df61a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -677,7 +677,7 @@ struct iwl_priv {
struct iwl_hw_params hw_params;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 valid_contexts;
void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
@@ -722,11 +722,11 @@ struct iwl_priv {
unsigned long scan_start;
unsigned long scan_start_tsf;
void *scan_cmd;
- enum ieee80211_band scan_band;
+ enum nl80211_band scan_band;
struct cfg80211_scan_request *scan_request;
struct ieee80211_vif *scan_vif;
enum iwl_scan_type scan_type;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 scan_tx_ant[NUM_NL80211_BANDS];
u8 mgmt_tx_ant;
/* max number of station keys */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index cc13c0406..f21732ec3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -420,7 +420,7 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv,
.data = { &cmd, },
};
- cmd.band = priv->band == IEEE80211_BAND_2GHZ;
+ cmd.band = priv->band == NL80211_BAND_2GHZ;
ch = ch_switch->chandef.chan->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
ctx->active.channel, ch);
@@ -588,7 +588,7 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv,
hcmd.data[0] = cmd;
- cmd->band = priv->band == IEEE80211_BAND_2GHZ;
+ cmd->band = priv->band == NL80211_BAND_2GHZ;
ch = ch_switch->chandef.chan->hw_value;
IWL_DEBUG_11H(priv, "channel switch from %u to %u\n",
ctx->active.channel, ch);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 179946926..8dda52ae3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -94,7 +94,7 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum nl80211_band band)
{
int idx = 0;
int band_offset = 0;
@@ -105,7 +105,7 @@ int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
return idx;
/* Legacy rate format, search for match in table */
} else {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
@@ -878,7 +878,7 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
int i;
u8 ind = ant;
- if (priv->band == IEEE80211_BAND_2GHZ &&
+ if (priv->band == NL80211_BAND_2GHZ &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index c63ea7957..8c0719468 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -202,12 +202,12 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
- if (priv->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->nvm_data->bands[IEEE80211_BAND_2GHZ];
- if (priv->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ if (priv->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &priv->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (priv->nvm_data->bands[NL80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &priv->nvm_data->bands[NL80211_BAND_5GHZ];
hw->wiphy->hw_version = priv->trans->hw_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 856281279..37b32a6f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -262,7 +262,7 @@ int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
rate_flags = iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
/* In mac80211, rates for 5 GHz start at 0 */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate += IWL_FIRST_OFDM_RATE;
else if (rate >= IWL_FIRST_CCK_RATE && rate <= IWL_LAST_CCK_RATE)
rate_flags |= RATE_MCS_CCK_MSK;
@@ -1071,7 +1071,7 @@ static void iwl_bg_restart(struct work_struct *data)
static void iwl_setup_deferred_work(struct iwl_priv *priv)
{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
+ priv->workqueue = alloc_ordered_workqueue(DRV_NAME, 0);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -1117,7 +1117,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
INIT_LIST_HEAD(&priv->calib_results);
- priv->band = IEEE80211_BAND_2GHZ;
+ priv->band = NL80211_BAND_2GHZ;
priv->plcp_delta_threshold = priv->lib->plcp_delta_threshold;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index ee7505537..b95c2d76d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -599,7 +599,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_priv *priv,
* fill "search" or "active" tx mode table.
*/
static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct iwl_scale_tbl_info *tbl,
int *rate_idx)
{
@@ -624,7 +624,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
/* legacy rate format */
if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
if (num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -802,7 +802,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
switch_to_legacy = 1;
scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
tbl->lq_type = LQ_A;
else
tbl->lq_type = LQ_G;
@@ -821,7 +821,7 @@ static u32 rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
/* Mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
/* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate_mask = (u16)(rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
else
@@ -939,7 +939,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
table = &lq_sta->lq;
tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
+ if (priv->band == NL80211_BAND_5GHZ)
rs_index -= IWL_FIRST_OFDM_RATE;
mac_flags = info->status.rates[0].flags;
mac_index = info->status.rates[0].idx;
@@ -952,7 +952,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
* mac80211 HT index is always zero-indexed; we need to move
* HT OFDM rates after CCK rates in 2.4 GHz band
*/
- if (priv->band == IEEE80211_BAND_2GHZ)
+ if (priv->band == NL80211_BAND_2GHZ)
mac_index += IWL_FIRST_OFDM_RATE;
}
/* Here we actually compare this rate to the latest LQ command */
@@ -2284,7 +2284,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
/* mask with station rate restriction */
if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
/* supp_rates has no CCK bits in A mode */
rate_scale_index_msk = (u16) (rate_mask &
(lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
@@ -2721,7 +2721,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
/* Get max rate if user set max rate */
if (lq_sta) {
lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
+ if ((sband->band == NL80211_BAND_5GHZ) &&
(lq_sta->max_rate_idx != -1))
lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
if ((lq_sta->max_rate_idx < 0) ||
@@ -2763,11 +2763,11 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
} else {
/* Check for invalid rates */
if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
+ ((sband->band == NL80211_BAND_5GHZ) &&
(rate_idx < IWL_FIRST_OFDM_RATE)))
rate_idx = rate_lowest_index(sband, sta);
/* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
+ else if (sband->band == NL80211_BAND_5GHZ)
rate_idx -= IWL_FIRST_OFDM_RATE;
info->control.rates[0].flags = 0;
}
@@ -2880,7 +2880,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
/* Set last_txrate_idx to lowest rate */
lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
+ if (sband->band == NL80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index c5fe44584..50c1e951d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -355,7 +355,7 @@ struct iwl_lq_sta {
u8 action_counter; /* # mode-switch actions tried */
u8 is_green;
u8 is_dup;
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
u32 supp_rates;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 52ab1e012..dfa2041cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -686,7 +686,7 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
- ieee80211_rx_napi(priv->hw, skb, priv->napi);
+ ieee80211_rx_napi(priv->hw, NULL, skb, priv->napi);
}
static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
@@ -834,7 +834,7 @@ static void iwlagn_rx_reply_rx(struct iwl_priv *priv,
/* rx_status carries information about the packet to mac80211 */
rx_status.mactime = le64_to_cpu(phy_res->timestamp);
rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
rx_status.band);
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 2d47cb24c..b22855218 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -719,7 +719,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
struct iwl_rxon_context *ctx)
{
- enum ieee80211_band band = ch->band;
+ enum nl80211_band band = ch->band;
u16 channel = ch->hw_value;
if ((le16_to_cpu(ctx->staging.channel) == channel) &&
@@ -727,7 +727,7 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
return;
ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
else
ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
@@ -740,10 +740,10 @@ void iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
void iwl_set_flags_for_band(struct iwl_priv *priv,
struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_vif *vif)
{
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
ctx->staging.flags &=
~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
| RXON_FLG_CCK_MSK);
@@ -1476,7 +1476,7 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
iwlagn_set_rxon_chain(priv, ctx);
- if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
+ if (bss_conf->use_cts_prot && (priv->band != NL80211_BAND_5GHZ))
ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
else
ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index 81a2ddbe9..d01766f16 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -312,7 +312,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
scan_notif->tsf_high, scan_notif->status);
IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ (priv->scan_band == NL80211_BAND_2GHZ) ? "2.4" : "5.2",
jiffies_to_msecs(jiffies - priv->scan_start));
/*
@@ -362,9 +362,9 @@ void iwl_setup_rx_scan_handlers(struct iwl_priv *priv)
}
static u16 iwl_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band, u8 n_probes)
+ enum nl80211_band band, u8 n_probes)
{
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
return IWL_ACTIVE_DWELL_TIME_52 +
IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
else
@@ -431,9 +431,9 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
}
static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
+ u16 passive = (band == NL80211_BAND_2GHZ) ?
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
@@ -442,7 +442,7 @@ static u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
/* Return valid, unused, channel for a passive scan to reset the RF */
static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
struct ieee80211_supported_band *sband = priv->hw->wiphy->bands[band];
struct iwl_rxon_context *ctx;
@@ -470,7 +470,7 @@ static u8 iwl_get_single_channel_number(struct iwl_priv *priv,
static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct iwl_scan_channel *scan_ch)
{
const struct ieee80211_supported_band *sband;
@@ -492,7 +492,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
cpu_to_le16(IWL_RADIO_RESET_DWELL_TIME);
/* Set txpower levels to defaults */
scan_ch->dsp_atten = 110;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -505,7 +505,7 @@ static int iwl_get_channel_for_reset_scan(struct iwl_priv *priv,
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 is_active, u8 n_probes,
struct iwl_scan_channel *scan_ch)
{
@@ -553,7 +553,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -636,7 +636,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
u32 rate_flags = 0;
u16 cmd_len = 0;
u16 rx_chain = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u8 n_probes = 0;
u8 rx_ant = priv->nvm_data->valid_rx_ant;
u8 rate;
@@ -750,7 +750,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
chan_mod = le32_to_cpu(
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
@@ -771,7 +771,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->lib->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
rate = IWL_RATE_6M_PLCP;
break;
default:
@@ -809,7 +809,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (band == IEEE80211_BAND_2GHZ &&
+ if (band == NL80211_BAND_2GHZ &&
priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
@@ -925,16 +925,16 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
void iwl_init_scan_params(struct iwl_priv *priv)
{
u8 ant_idx = fls(priv->nvm_data->valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[NL80211_BAND_5GHZ])
+ priv->scan_tx_ant[NL80211_BAND_5GHZ] = ant_idx;
+ if (!priv->scan_tx_ant[NL80211_BAND_2GHZ])
+ priv->scan_tx_ant[NL80211_BAND_2GHZ] = ant_idx;
}
int __must_check iwl_scan_initiate(struct iwl_priv *priv,
struct ieee80211_vif *vif,
enum iwl_scan_type scan_type,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index 8e9768a55..de6ec9b7a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -579,7 +579,7 @@ static void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
/* Set up the rate scaling to start at selected rate, fall back
* all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
+ if (priv->band == NL80211_BAND_5GHZ)
r = IWL_RATE_6M_INDEX;
else if (ctx && ctx->vif && ctx->vif->p2p)
r = IWL_RATE_6M_INDEX;
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 59e2001c3..4b97371c3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -81,7 +81,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_flags |= TX_CMD_FLG_TSF_MSK;
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
- else if (info->band == IEEE80211_BAND_2GHZ &&
+ else if (info->band == NL80211_BAND_2GHZ &&
priv->lib->bt_params &&
priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
@@ -177,7 +177,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_idx = rate_lowest_index(
&priv->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* Get PLCP rate for tx_cmd->rate_n_flags */
rate_plcp = iwl_rates[rate_idx].plcp;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
index 2cfac764a..9bdac0306 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-1000.c
@@ -34,10 +34,6 @@
#define IWL1000_UCODE_API_MAX 5
#define IWL100_UCODE_API_MAX 5
-/* Oldest version we won't warn about */
-#define IWL1000_UCODE_API_OK 5
-#define IWL100_UCODE_API_OK 5
-
/* Lowest firmware API version supported */
#define IWL1000_UCODE_API_MIN 1
#define IWL100_UCODE_API_MIN 5
@@ -56,7 +52,7 @@
static const struct iwl_base_params iwl1000_base_params = {
.num_of_queues = IWLAGN_NUM_QUEUES,
.eeprom_size = OTP_LOW_IMAGE_SIZE,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+ .pll_cfg = true,
.max_ll_items = OTP_MAX_LL_ITEMS_1000,
.shadow_ram_support = false,
.led_compensation = 51,
@@ -68,7 +64,7 @@ static const struct iwl_base_params iwl1000_base_params = {
static const struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
};
static const struct iwl_eeprom_params iwl1000_eeprom_params = {
@@ -86,7 +82,6 @@ static const struct iwl_eeprom_params iwl1000_eeprom_params = {
#define IWL_DEVICE_1000 \
.fw_name_pre = IWL1000_FW_PRE, \
.ucode_api_max = IWL1000_UCODE_API_MAX, \
- .ucode_api_ok = IWL1000_UCODE_API_OK, \
.ucode_api_min = IWL1000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_1000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -112,7 +107,6 @@ const struct iwl_cfg iwl1000_bg_cfg = {
#define IWL_DEVICE_100 \
.fw_name_pre = IWL100_FW_PRE, \
.ucode_api_max = IWL100_UCODE_API_MAX, \
- .ucode_api_ok = IWL100_UCODE_API_OK, \
.ucode_api_min = IWL100_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_100, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
index f128e40cb..761cdccaa 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-2000.c
@@ -36,12 +36,6 @@
#define IWL105_UCODE_API_MAX 6
#define IWL135_UCODE_API_MAX 6
-/* Oldest version we won't warn about */
-#define IWL2030_UCODE_API_OK 6
-#define IWL2000_UCODE_API_OK 6
-#define IWL105_UCODE_API_OK 6
-#define IWL135_UCODE_API_OK 6
-
/* Lowest firmware API version supported */
#define IWL2030_UCODE_API_MIN 5
#define IWL2000_UCODE_API_MIN 5
@@ -68,7 +62,6 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -82,7 +75,6 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
@@ -95,7 +87,7 @@ static const struct iwl_base_params iwl2030_base_params = {
static const struct iwl_ht_params iwl2000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ),
};
static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
@@ -114,7 +106,6 @@ static const struct iwl_eeprom_params iwl20x0_eeprom_params = {
#define IWL_DEVICE_2000 \
.fw_name_pre = IWL2000_FW_PRE, \
.ucode_api_max = IWL2000_UCODE_API_MAX, \
- .ucode_api_ok = IWL2000_UCODE_API_OK, \
.ucode_api_min = IWL2000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -142,7 +133,6 @@ const struct iwl_cfg iwl2000_2bgn_d_cfg = {
#define IWL_DEVICE_2030 \
.fw_name_pre = IWL2030_FW_PRE, \
.ucode_api_max = IWL2030_UCODE_API_MAX, \
- .ucode_api_ok = IWL2030_UCODE_API_OK, \
.ucode_api_min = IWL2030_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_2030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -163,7 +153,6 @@ const struct iwl_cfg iwl2030_2bgn_cfg = {
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
- .ucode_api_ok = IWL105_UCODE_API_OK, \
.ucode_api_min = IWL105_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_105, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +180,6 @@ const struct iwl_cfg iwl105_bgn_d_cfg = {
#define IWL_DEVICE_135 \
.fw_name_pre = IWL135_FW_PRE, \
.ucode_api_max = IWL135_UCODE_API_MAX, \
- .ucode_api_ok = IWL135_UCODE_API_OK, \
.ucode_api_min = IWL135_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_135, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
index cae82e1a1..230f324ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-5000.c
@@ -34,10 +34,6 @@
#define IWL5000_UCODE_API_MAX 5
#define IWL5150_UCODE_API_MAX 2
-/* Oldest version we won't warn about */
-#define IWL5000_UCODE_API_OK 5
-#define IWL5150_UCODE_API_OK 2
-
/* Lowest firmware API version supported */
#define IWL5000_UCODE_API_MIN 1
#define IWL5150_UCODE_API_MIN 1
@@ -57,7 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
+ .pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
.max_event_log_size = 512,
@@ -66,7 +62,7 @@ static const struct iwl_base_params iwl5000_base_params = {
static const struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_eeprom_params iwl5000_eeprom_params = {
@@ -84,7 +80,6 @@ static const struct iwl_eeprom_params iwl5000_eeprom_params = {
#define IWL_DEVICE_5000 \
.fw_name_pre = IWL5000_FW_PRE, \
.ucode_api_max = IWL5000_UCODE_API_MAX, \
- .ucode_api_ok = IWL5000_UCODE_API_OK, \
.ucode_api_min = IWL5000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5000, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
@@ -132,7 +127,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
.name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
.fw_name_pre = IWL5000_FW_PRE,
.ucode_api_max = IWL5000_UCODE_API_MAX,
- .ucode_api_ok = IWL5000_UCODE_API_OK,
.ucode_api_min = IWL5000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_5000,
.max_inst_size = IWLAGN_RTC_INST_SIZE,
@@ -149,7 +143,6 @@ const struct iwl_cfg iwl5350_agn_cfg = {
#define IWL_DEVICE_5150 \
.fw_name_pre = IWL5150_FW_PRE, \
.ucode_api_max = IWL5150_UCODE_API_MAX, \
- .ucode_api_ok = IWL5150_UCODE_API_OK, \
.ucode_api_min = IWL5150_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_5150, \
.max_inst_size = IWLAGN_RTC_INST_SIZE, \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
index b8b2a0e08..fa86e5e7d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-6000.c
@@ -36,13 +36,6 @@
#define IWL6000G2_UCODE_API_MAX 6
#define IWL6035_UCODE_API_MAX 6
-/* Oldest version we won't warn about */
-#define IWL6000_UCODE_API_OK 4
-#define IWL6000G2_UCODE_API_OK 5
-#define IWL6050_UCODE_API_OK 5
-#define IWL6000G2B_UCODE_API_OK 6
-#define IWL6035_UCODE_API_OK 6
-
/* Lowest firmware API version supported */
#define IWL6000_UCODE_API_MIN 4
#define IWL6050_UCODE_API_MIN 4
@@ -78,7 +71,6 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -91,7 +83,6 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
@@ -104,7 +95,6 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
- .pll_cfg_val = 0,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
@@ -117,7 +107,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
static const struct iwl_ht_params iwl6000_ht_params = {
.ht_greenfield_support = true,
.use_rts_for_aggregation = true, /* use rts/cts protection */
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_eeprom_params iwl6000_eeprom_params = {
@@ -136,7 +126,6 @@ static const struct iwl_eeprom_params iwl6000_eeprom_params = {
#define IWL_DEVICE_6005 \
.fw_name_pre = IWL6005_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6005, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -191,7 +180,6 @@ const struct iwl_cfg iwl6005_2agn_mow2_cfg = {
#define IWL_DEVICE_6030 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6000G2_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000G2B_UCODE_API_OK, \
.ucode_api_min = IWL6000G2_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -228,7 +216,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
#define IWL_DEVICE_6035 \
.fw_name_pre = IWL6030_FW_PRE, \
.ucode_api_max = IWL6035_UCODE_API_MAX, \
- .ucode_api_ok = IWL6035_UCODE_API_OK, \
.ucode_api_min = IWL6035_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6030, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -282,7 +269,6 @@ const struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
- .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_6000i, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -370,7 +356,6 @@ const struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
- .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.device_family = IWL_DEVICE_FAMILY_6000,
.max_inst_size = IWL60_RTC_INST_SIZE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
index 786a919fa..8d164d896 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c
@@ -76,16 +76,10 @@
#define IWL7265D_UCODE_API_MAX 21
#define IWL3168_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 13
-#define IWL7265_UCODE_API_OK 13
-#define IWL7265D_UCODE_API_OK 13
-#define IWL3168_UCODE_API_OK 20
-
/* Lowest firmware API version supported */
-#define IWL7260_UCODE_API_MIN 13
-#define IWL7265_UCODE_API_MIN 13
-#define IWL7265D_UCODE_API_MIN 13
+#define IWL7260_UCODE_API_MIN 16
+#define IWL7265_UCODE_API_MIN 16
+#define IWL7265D_UCODE_API_MIN 16
#define IWL3168_UCODE_API_MIN 20
/* NVM versions */
@@ -128,7 +122,6 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -162,7 +155,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
static const struct iwl_ht_params iwl7000_ht_params = {
.stbc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
#define IWL_DEVICE_7000_COMMON \
@@ -179,25 +172,21 @@ static const struct iwl_ht_params iwl7000_ht_params = {
#define IWL_DEVICE_7000 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7260_UCODE_API_MAX, \
- .ucode_api_ok = IWL7260_UCODE_API_OK, \
.ucode_api_min = IWL7260_UCODE_API_MIN
#define IWL_DEVICE_7005 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265_UCODE_API_MAX, \
- .ucode_api_ok = IWL7265_UCODE_API_OK, \
.ucode_api_min = IWL7265_UCODE_API_MIN
#define IWL_DEVICE_3008 \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL3168_UCODE_API_MAX, \
- .ucode_api_ok = IWL3168_UCODE_API_OK, \
.ucode_api_min = IWL3168_UCODE_API_MIN
#define IWL_DEVICE_7005D \
IWL_DEVICE_7000_COMMON, \
.ucode_api_max = IWL7265D_UCODE_API_MAX, \
- .ucode_api_ok = IWL7265D_UCODE_API_OK, \
.ucode_api_min = IWL7265D_UCODE_API_MIN
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -297,7 +286,7 @@ static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
static const struct iwl_ht_params iwl7265_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
const struct iwl_cfg iwl3165_2ac_cfg = {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index 16846cae3..0b4684234 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -73,12 +73,8 @@
#define IWL8000_UCODE_API_MAX 21
#define IWL8265_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL8000_UCODE_API_OK 13
-#define IWL8265_UCODE_API_OK 20
-
/* Lowest firmware API version supported */
-#define IWL8000_UCODE_API_MIN 13
+#define IWL8000_UCODE_API_MIN 16
#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
@@ -116,7 +112,6 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -128,7 +123,7 @@ static const struct iwl_base_params iwl8000_base_params = {
static const struct iwl_ht_params iwl8000_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_tt_params iwl8000_tt_params = {
@@ -175,19 +170,16 @@ static const struct iwl_tt_params iwl8000_tt_params = {
#define IWL_DEVICE_8000 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8260 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
.ucode_api_min = IWL8000_UCODE_API_MIN \
#define IWL_DEVICE_8265 \
IWL_DEVICE_8000_COMMON, \
.ucode_api_max = IWL8265_UCODE_API_MAX, \
- .ucode_api_ok = IWL8265_UCODE_API_OK, \
.ucode_api_min = IWL8265_UCODE_API_MIN \
const struct iwl_cfg iwl8260_2n_cfg = {
@@ -244,6 +236,20 @@ const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
};
+const struct iwl_cfg iwl8265_2ac_sdio_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8265",
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+ .max_rx_agg_size = MAX_RX_AGG_SIZE_8260_SDIO,
+ .max_tx_agg_size = MAX_TX_AGG_SIZE_8260_SDIO,
+ .disable_dummy_notification = true,
+ .max_ht_ampdu_exponent = MAX_HT_AMPDU_EXPONENT_8260_SDIO,
+ .max_vht_ampdu_exponent = MAX_VHT_AMPDU_EXPONENT_8260_SDIO,
+};
+
const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
index 7b34387d7..19569fff2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-9000.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -18,7 +18,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015-2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -57,11 +57,8 @@
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 21
-/* Oldest version we won't warn about */
-#define IWL9000_UCODE_API_OK 13
-
/* Lowest firmware API version supported */
-#define IWL9000_UCODE_API_MIN 13
+#define IWL9000_UCODE_API_MIN 16
/* NVM versions */
#define IWL9000_NVM_VERSION 0x0a1d
@@ -76,15 +73,20 @@
#define IWL9000_SMEM_LEN 0x68000
#define IWL9000_FW_PRE "/*(DEBLOBBED)*/"
+#define IWL9260_FW_PRE "/*(DEBLOBBED)*/"
+#define IWL9260LC_FW_PRE "/*(DEBLOBBED)*/"
#define IWL9000_MODULE_FIRMWARE(api) \
IWL9000_FW_PRE /*(DEBLOBBED)*/
+#define IWL9260_MODULE_FIRMWARE(api) \
+ IWL9260_FW_PRE /*(DEBLOBBED)*/
+#define IWL9260LC_MODULE_FIRMWARE(api) \
+ IWL9260LC_FW_PRE /*(DEBLOBBED)*/
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
- .pll_cfg_val = 0,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
@@ -96,7 +98,7 @@ static const struct iwl_base_params iwl9000_base_params = {
static const struct iwl_ht_params iwl9000_ht_params = {
.stbc = true,
.ldpc = true,
- .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+ .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
static const struct iwl_tt_params iwl9000_tt_params = {
@@ -122,7 +124,6 @@ static const struct iwl_tt_params iwl9000_tt_params = {
#define IWL_DEVICE_9000 \
.ucode_api_max = IWL9000_UCODE_API_MAX, \
- .ucode_api_ok = IWL9000_UCODE_API_OK, \
.ucode_api_min = IWL9000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
@@ -137,15 +138,31 @@ static const struct iwl_tt_params iwl9000_tt_params = {
.dccm2_len = IWL9000_DCCM2_LEN, \
.smem_offset = IWL9000_SMEM_OFFSET, \
.smem_len = IWL9000_SMEM_LEN, \
+ .features = IWL_TX_CSUM_NETIF_FLAGS | NETIF_F_RXCSUM, \
.thermal_params = &iwl9000_tt_params, \
.apmg_not_supported = true, \
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
- .mac_addr_from_csr = true
+ .mac_addr_from_csr = true, \
+ .rf_id = true
const struct iwl_cfg iwl9260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9260",
- .fw_name_pre = IWL9000_FW_PRE,
+ .fw_name_pre = IWL9260_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+/*
+ * TODO the struct below is for internal testing only this should be
+ * removed by EO 2016~
+ */
+const struct iwl_cfg iwl9260lc_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 9260",
+ .fw_name_pre = IWL9260LC_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 3e4d346be..4a0af7de8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright (C) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -131,6 +133,8 @@ enum iwl_led_mode {
#define IWL_MAX_WD_TIMEOUT 120000
#define IWL_DEFAULT_MAX_TX_POWER 22
+#define IWL_TX_CSUM_NETIF_FLAGS (NETIF_F_IPV6_CSUM | NETIF_F_IP_CSUM |\
+ NETIF_F_TSO | NETIF_F_TSO6)
/* Antenna presence definitions */
#define ANT_NONE 0x0
@@ -163,34 +167,36 @@ static inline u8 num_of_ant(u8 mask)
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
*/
struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- /* for iwl_pcie_apm_init() */
- u32 pll_cfg_val;
-
- const u16 max_ll_items;
- const bool shadow_ram_support;
- u16 led_compensation;
unsigned int wd_timeout;
- u32 max_event_log_size;
- const bool shadow_reg_enable;
- const bool pcie_l1_allowed;
- const bool apmg_wake_up_wa;
- const bool scd_chain_ext_wa;
+
+ u16 eeprom_size;
+ u16 max_event_log_size;
+
+ u8 pll_cfg:1, /* for iwl_pcie_apm_init() */
+ shadow_ram_support:1,
+ shadow_reg_enable:1,
+ pcie_l1_allowed:1,
+ apmg_wake_up_wa:1,
+ scd_chain_ext_wa:1;
+
+ u8 num_of_queues; /* def: HW dependent */
+
+ u8 max_ll_items;
+ u8 led_compensation;
};
/*
* @stbc: support Tx STBC and 1*SS Rx STBC
* @ldpc: support Tx/Rx with LDPC
* @use_rts_for_aggregation: use rts/cts protection for HT traffic
- * @ht40_bands: bitmap of bands (using %IEEE80211_BAND_*) that support HT40
+ * @ht40_bands: bitmap of bands (using %NL80211_BAND_*) that support HT40
*/
struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
- const bool ht_greenfield_support; /* if used set to true */
- const bool stbc;
- const bool ldpc;
- bool use_rts_for_aggregation;
+ u8 ht_greenfield_support:1,
+ stbc:1,
+ ldpc:1,
+ use_rts_for_aggregation:1;
u8 ht40_bands;
};
@@ -231,10 +237,10 @@ struct iwl_tt_params {
u32 tx_protection_entry;
u32 tx_protection_exit;
struct iwl_tt_tx_backoff tx_backoff[TT_TX_BACKOFF_SIZE];
- bool support_ct_kill;
- bool support_dynamic_smps;
- bool support_tx_protection;
- bool support_tx_backoff;
+ u8 support_ct_kill:1,
+ support_dynamic_smps:1,
+ support_tx_protection:1,
+ support_tx_backoff:1;
};
/*
@@ -277,8 +283,6 @@ struct iwl_pwr_tx_backoff {
* (.ucode) will be added to filename before loading from disk. The
* filename is constructed as fw_name_pre<api>.ucode.
* @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- * without a warning, for use in transitions
* @ucode_api_min: Lowest version of uCode API supported by driver.
* @max_inst_size: The maximal length of the fw inst section
* @max_data_size: The maximal length of the fw data section
@@ -314,6 +318,7 @@ struct iwl_pwr_tx_backoff {
* @smem_len: the length of SMEM
* @mq_rx_supported: multi-queue rx support
* @vht_mu_mimo_supported: VHT MU-MIMO support
+ * @rf_id: need to read rf_id to determine the firmware image
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -323,51 +328,51 @@ struct iwl_cfg {
/* params specific to an individual device within a device family */
const char *name;
const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_ok;
- const unsigned int ucode_api_min;
- const enum iwl_device_family device_family;
- const u32 max_data_size;
- const u32 max_inst_size;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u8 non_shared_ant;
- bool bt_shared_single_ant;
- u16 nvm_ver;
- u16 nvm_calib_ver;
/* params not likely to change within a device family */
const struct iwl_base_params *base_params;
/* params likely to change within a device family */
const struct iwl_ht_params *ht_params;
const struct iwl_eeprom_params *eeprom_params;
- enum iwl_led_mode led_mode;
- const bool rx_with_siso_diversity;
- const bool internal_wimax_coex;
- const bool host_interrupt_operation_mode;
- bool high_temp;
- u8 nvm_hw_section_num;
- bool mac_addr_from_csr;
- bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
- bool no_power_up_nic_in_init;
const char *default_nvm_file_B_step;
const char *default_nvm_file_C_step;
- netdev_features_t features;
- unsigned int max_rx_agg_size;
- bool disable_dummy_notification;
- unsigned int max_tx_agg_size;
- unsigned int max_ht_ampdu_exponent;
- unsigned int max_vht_ampdu_exponent;
- const u32 dccm_offset;
- const u32 dccm_len;
- const u32 dccm2_offset;
- const u32 dccm2_len;
- const u32 smem_offset;
- const u32 smem_len;
const struct iwl_tt_params *thermal_params;
- bool apmg_not_supported;
- bool mq_rx_supported;
- bool vht_mu_mimo_supported;
+ enum iwl_device_family device_family;
+ enum iwl_led_mode led_mode;
+ u32 max_data_size;
+ u32 max_inst_size;
+ netdev_features_t features;
+ u32 dccm_offset;
+ u32 dccm_len;
+ u32 dccm2_offset;
+ u32 dccm2_len;
+ u32 smem_offset;
+ u32 smem_len;
+ u16 nvm_ver;
+ u16 nvm_calib_ver;
+ u16 rx_with_siso_diversity:1,
+ bt_shared_single_ant:1,
+ internal_wimax_coex:1,
+ host_interrupt_operation_mode:1,
+ high_temp:1,
+ mac_addr_from_csr:1,
+ lp_xtal_workaround:1,
+ no_power_up_nic_in_init:1,
+ disable_dummy_notification:1,
+ apmg_not_supported:1,
+ mq_rx_supported:1,
+ vht_mu_mimo_supported:1,
+ rf_id:1;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u8 non_shared_ant;
+ u8 nvm_hw_section_num;
+ u8 max_rx_agg_size;
+ u8 max_tx_agg_size;
+ u8 max_ht_ampdu_exponent;
+ u8 max_vht_ampdu_exponent;
+ u8 ucode_api_max;
+ u8 ucode_api_min;
};
/*
@@ -438,8 +443,10 @@ extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
+extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
extern const struct iwl_cfg iwl4165_2ac_sdio_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
+extern const struct iwl_cfg iwl9260lc_2ac_cfg;
extern const struct iwl_cfg iwl5165_2ac_cfg;
#endif /* CONFIG_IWLMVM */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index b978f6cae..b52913448 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -108,6 +108,17 @@
#define CSR_HW_REV (CSR_BASE+0x028)
/*
+ * RF ID revision info
+ * Bit fields:
+ * 31:24: Reserved (set to 0x0)
+ * 23:12: Type
+ * 11:8: Step (A - 0x0, B - 0x1, etc)
+ * 7:4: Dash
+ * 3:0: Flavor
+ */
+#define CSR_HW_RF_ID (CSR_BASE+0x09c)
+
+/*
* EEPROM and OTP (one-time-programmable) memory reads
*
* NOTE: Device must be awake, initialized via apm_ops.init(),
@@ -333,6 +344,10 @@ enum {
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
+/* RF_ID value */
+#define CSR_HW_RF_ID_TYPE_JF (0x00105000)
+#define CSR_HW_RF_ID_TYPE_LC (0x00101000)
+
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7ce381ba3..c695125d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -117,7 +117,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[32]; /* name of firmware file to load */
+ char firmware_name[64]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -179,6 +179,8 @@ static void iwl_dealloc_ucode(struct iwl_drv *drv)
kfree(drv->fw.dbg_conf_tlv[i]);
for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_trigger_tlv); i++)
kfree(drv->fw.dbg_trigger_tlv[i]);
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++)
+ kfree(drv->fw.dbg_mem_tlv[i]);
for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
iwl_free_fw_img(drv, drv->fw.img + i);
@@ -209,20 +211,12 @@ static int iwl_alloc_fw_desc(struct iwl_drv *drv, struct fw_desc *desc,
static void iwl_req_fw_callback(const struct firmware *ucode_raw,
void *context);
-#define UCODE_EXPERIMENTAL_INDEX 100
-#define UCODE_EXPERIMENTAL_TAG "exp"
-
static int iwl_request_firmware(struct iwl_drv *drv, bool first)
{
const char *name_pre = drv->cfg->fw_name_pre;
char tag[8];
if (first) {
-#ifdef CONFIG_IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
- drv->fw_index = UCODE_EXPERIMENTAL_INDEX;
- strcpy(tag, UCODE_EXPERIMENTAL_TAG);
- } else if (drv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
-#endif
drv->fw_index = drv->cfg->ucode_api_max;
sprintf(tag, "%d", drv->fw_index);
} else {
@@ -238,9 +232,7 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
snprintf(drv->firmware_name, sizeof(drv->firmware_name), "/*(DEBLOBBED)*/",
name_pre, tag);
- IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? "EXPERIMENTAL " : "",
+ IWL_DEBUG_INFO(drv, "attempting to load firmware '%s'\n",
drv->firmware_name);
return reject_firmware_nowait(THIS_MODULE, 1, drv->firmware_name,
@@ -284,6 +276,7 @@ struct iwl_firmware_pieces {
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
};
/*
@@ -538,9 +531,7 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
}
if (build)
- sprintf(buildstr, " build %u%s", build,
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
+ sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@@ -624,9 +615,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
build = le32_to_cpu(ucode->build);
if (build)
- sprintf(buildstr, " build %u%s", build,
- (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
- ? " (EXP)" : "");
+ sprintf(buildstr, " build %u", build);
else
buildstr[0] = '\0';
@@ -1028,6 +1017,37 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
gscan_capa = true;
break;
+ case IWL_UCODE_TLV_FW_MEM_SEG: {
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
+ (void *)tlv_data;
+ u32 type;
+
+ if (tlv_len != (sizeof(*dbg_mem)))
+ goto invalid_tlv_len;
+
+ type = le32_to_cpu(dbg_mem->data_type);
+ drv->fw.dbg_dynamic_mem = true;
+
+ if (type >= ARRAY_SIZE(drv->fw.dbg_mem_tlv)) {
+ IWL_ERR(drv,
+ "Skip unknown dbg mem segment: %u\n",
+ dbg_mem->data_type);
+ break;
+ }
+
+ if (pieces->dbg_mem_tlv[type]) {
+ IWL_ERR(drv,
+ "Ignore duplicate mem segment: %u\n",
+ dbg_mem->data_type);
+ break;
+ }
+
+ IWL_DEBUG_INFO(drv, "Found debug memory segment: %u\n",
+ dbg_mem->data_type);
+
+ pieces->dbg_mem_tlv[type] = dbg_mem;
+ break;
+ }
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
@@ -1193,7 +1213,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int err;
struct iwl_firmware_pieces *pieces;
const unsigned int api_max = drv->cfg->ucode_api_max;
- unsigned int api_ok = drv->cfg->ucode_api_ok;
const unsigned int api_min = drv->cfg->ucode_api_min;
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
@@ -1206,20 +1225,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
- if (!api_ok)
- api_ok = api_max;
-
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces)
return;
- if (!ucode_raw) {
- if (drv->fw_index <= api_ok)
- IWL_ERR(drv,
- "request for firmware file '%s' failed.\n",
- drv->firmware_name);
+ if (!ucode_raw)
goto try_again;
- }
IWL_DEBUG_INFO(drv, "Loaded firmware file '%s' (%zd bytes).\n",
drv->firmware_name, ucode_raw->size);
@@ -1252,28 +1263,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
* firmware filename ... but we don't check for that and only rely
* on the API version read from firmware header from here on forward
*/
- /* no api version check required for experimental uCode */
- if (drv->fw_index != UCODE_EXPERIMENTAL_INDEX) {
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(drv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver < api_ok) {
- if (api_ok != api_max)
- IWL_ERR(drv, "Firmware has old API version, "
- "expected v%u through v%u, got v%u.\n",
- api_ok, api_max, api_ver);
- else
- IWL_ERR(drv, "Firmware has old API version, "
- "expected v%u, got v%u.\n",
- api_max, api_ver);
- IWL_ERR(drv, "New firmware can be obtained from "
- "http://www.intellinuxwireless.org/.\n");
- }
+ if (api_ver < api_min || api_ver > api_max) {
+ IWL_ERR(drv,
+ "Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n",
+ api_max, api_ver);
+ goto try_again;
}
/*
@@ -1362,6 +1357,17 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
}
}
+ for (i = 0; i < ARRAY_SIZE(drv->fw.dbg_mem_tlv); i++) {
+ if (pieces->dbg_mem_tlv[i]) {
+ drv->fw.dbg_mem_tlv[i] =
+ kmemdup(pieces->dbg_mem_tlv[i],
+ sizeof(*drv->fw.dbg_mem_tlv[i]),
+ GFP_KERNEL);
+ if (!drv->fw.dbg_mem_tlv[i])
+ goto out_free_fw;
+ }
+ }
+
/* Now that we can no longer fail, copy information */
/*
@@ -1554,9 +1560,7 @@ struct iwl_mod_params iwlwifi_mod_params = {
.power_level = IWL_POWER_INDEX_1,
.d0i3_disable = true,
.d0i3_entry_delay = 1000,
-#ifndef CONFIG_IWLWIFI_UAPSD
- .uapsd_disable = true,
-#endif /* CONFIG_IWLWIFI_UAPSD */
+ .uapsd_disable = IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT,
/* the rest are 0 by default */
};
IWL_EXPORT_SYMBOL(iwlwifi_mod_params);
@@ -1675,12 +1679,9 @@ module_param_named(lar_disable, iwlwifi_mod_params.lar_disable,
MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)");
module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable,
- bool, S_IRUGO | S_IWUSR);
-#ifdef CONFIG_IWLWIFI_UAPSD
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: N)");
-#else
-MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality (default: Y)");
-#endif
+ uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(uapsd_disable,
+ "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)");
/*
* set bt_coex_active to true, uCode will do kill/defer
@@ -1726,4 +1727,4 @@ MODULE_PARM_DESC(d0i3_timeout, "Timeout to D0i3 entry when idle (ms)");
module_param_named(disable_11ac, iwlwifi_mod_params.disable_11ac, bool,
S_IRUGO);
-MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities");
+MODULE_PARM_DESC(disable_11ac, "Disable VHT capabilities (default: false)");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
index c15f5be85..bf1b69aec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
@@ -390,10 +390,10 @@ iwl_eeprom_enh_txp_read_element(struct iwl_nvm_data *data,
int n_channels, s8 max_txpower_avg)
{
int ch_idx;
- enum ieee80211_band band;
+ enum nl80211_band band;
band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
for (ch_idx = 0; ch_idx < n_channels; ch_idx++) {
struct ieee80211_channel *chan = &data->channels[ch_idx];
@@ -526,7 +526,7 @@ static void iwl_init_band_reference(const struct iwl_cfg *cfg,
static void iwl_mod_ht40_chan_info(struct device *dev,
struct iwl_nvm_data *data, int n_channels,
- enum ieee80211_band band, u16 channel,
+ enum nl80211_band band, u16 channel,
const struct iwl_eeprom_channel *eeprom_ch,
u8 clear_ht40_extension_channel)
{
@@ -548,7 +548,7 @@ static void iwl_mod_ht40_chan_info(struct device *dev,
IWL_DEBUG_EEPROM(dev,
"HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
channel,
- band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT(IBSS),
CHECK_AND_PRINT(ACTIVE),
CHECK_AND_PRINT(RADAR),
@@ -606,8 +606,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
n_channels++;
channel->hw_value = eeprom_ch_array[ch_idx];
- channel->band = (band == 1) ? IEEE80211_BAND_2GHZ
- : IEEE80211_BAND_5GHZ;
+ channel->band = (band == 1) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -677,15 +677,15 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
+ enum nl80211_band ieeeband;
iwl_init_band_reference(cfg, eeprom, eeprom_size, band,
&eeprom_ch_count, &eeprom_ch_info,
&eeprom_ch_array);
/* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband = (band == 6) ? IEEE80211_BAND_2GHZ
- : IEEE80211_BAND_5GHZ;
+ ieeeband = (band == 6) ? NL80211_BAND_2GHZ
+ : NL80211_BAND_5GHZ;
/* Loop through each band adding each of the channels */
for (ch_idx = 0; ch_idx < eeprom_ch_count; ch_idx++) {
@@ -708,7 +708,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band)
+ int n_channels, enum nl80211_band band)
{
struct ieee80211_channel *chan = &data->channels[0];
int n = 0, idx = 0;
@@ -734,7 +734,7 @@ int iwl_init_sband_channels(struct iwl_nvm_data *data,
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 tx_chains, u8 rx_chains)
{
int max_bit_rate = 0;
@@ -813,22 +813,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
int n_used = 0;
struct ieee80211_supported_band *sband;
- sband = &data->bands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
+ sband = &data->bands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
data->valid_tx_ant, data->valid_rx_ant);
- sband = &data->bands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
+ sband = &data->bands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
data->valid_tx_ant, data->valid_rx_ant);
if (n_channels != n_used)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
index ad2b83466..1f4e50289 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
@@ -98,7 +98,8 @@ struct iwl_nvm_data {
s8 max_tx_pwr_half_dbm;
bool lar_enabled;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ bool vht160_supported;
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels[];
};
@@ -133,12 +134,12 @@ int iwl_nvm_check_version(struct iwl_nvm_data *data,
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
- int n_channels, enum ieee80211_band band);
+ int n_channels, enum nl80211_band band);
void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 tx_chains, u8 rx_chains);
#endif /* __iwl_eeprom_parse_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 582008a66..270f39ecd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -321,6 +321,9 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
/* Write index table */
#define RFH_Q0_FRBDCB_WIDX 0xA08080
#define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4)
+/* Write index table - shadow registers */
+#define RFH_Q0_FRBDCB_WIDX_TRG 0x1C80
+#define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4)
/* Read index table */
#define RFH_Q0_FRBDCB_RIDX 0xA080C0
#define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
index 8425e1a58..09b7ea28f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-error-dump.h
@@ -105,6 +105,7 @@ enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_RB = 11,
IWL_FW_ERROR_DUMP_PAGING = 12,
IWL_FW_ERROR_DUMP_RADIO_REG = 13,
+ IWL_FW_ERROR_DUMP_INTERNAL_TXF = 14,
IWL_FW_ERROR_DUMP_MAX,
};
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
index 15ec4e290..37dc09e8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw-file.h
@@ -142,6 +142,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_DBG_CONF = 39,
IWL_UCODE_TLV_FW_DBG_TRIGGER = 40,
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
+ IWL_UCODE_TLV_FW_MEM_SEG = 51,
};
struct iwl_ucode_tlv {
@@ -245,7 +246,6 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
/**
* enum iwl_ucode_tlv_api - ucode api
- * @IWL_UCODE_TLV_API_BT_COEX_SPLIT: new API for BT Coex
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
* @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
@@ -260,12 +260,11 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_api_t;
* @NUM_IWL_UCODE_TLV_API: number of bits used
*/
enum iwl_ucode_tlv_api {
- IWL_UCODE_TLV_API_BT_COEX_SPLIT = (__force iwl_ucode_tlv_api_t)3,
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = (__force iwl_ucode_tlv_api_t)8,
IWL_UCODE_TLV_API_WIFI_MCC_UPDATE = (__force iwl_ucode_tlv_api_t)9,
IWL_UCODE_TLV_API_WIDE_CMD_HDR = (__force iwl_ucode_tlv_api_t)14,
IWL_UCODE_TLV_API_LQ_SS_PARAMS = (__force iwl_ucode_tlv_api_t)18,
- IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
+ IWL_UCODE_TLV_API_NEW_VERSION = (__force iwl_ucode_tlv_api_t)20,
IWL_UCODE_TLV_API_EXT_SCAN_PRIORITY = (__force iwl_ucode_tlv_api_t)24,
IWL_UCODE_TLV_API_TX_POWER_CHAIN = (__force iwl_ucode_tlv_api_t)27,
@@ -324,6 +323,9 @@ typedef unsigned int __bitwise__ iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
* @IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
* regular image.
+ * @IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWL_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -361,6 +363,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = (__force iwl_ucode_tlv_capa_t)75,
IWL_UCODE_TLV_CAPA_CTDP_SUPPORT = (__force iwl_ucode_tlv_capa_t)76,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED = (__force iwl_ucode_tlv_capa_t)77,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = (__force iwl_ucode_tlv_capa_t)80,
+ IWL_UCODE_TLV_CAPA_LQM_SUPPORT = (__force iwl_ucode_tlv_capa_t)81,
NUM_IWL_UCODE_TLV_CAPA
#ifdef __CHECKER__
@@ -491,6 +495,37 @@ enum iwl_fw_dbg_monitor_mode {
};
/**
+ * enum iwl_fw_mem_seg_type - data types for dumping on error
+ *
+ * @FW_DBG_MEM_SMEM: the data type is SMEM
+ * @FW_DBG_MEM_DCCM_LMAC: the data type is DCCM_LMAC
+ * @FW_DBG_MEM_DCCM_UMAC: the data type is DCCM_UMAC
+ */
+enum iwl_fw_dbg_mem_seg_type {
+ FW_DBG_MEM_DCCM_LMAC = 0,
+ FW_DBG_MEM_DCCM_UMAC,
+ FW_DBG_MEM_SMEM,
+
+ /* Must be last */
+ FW_DBG_MEM_MAX,
+};
+
+/**
+ * struct iwl_fw_dbg_mem_seg_tlv - configures the debug data memory segments
+ *
+ * @data_type: enum %iwl_fw_mem_seg_type
+ * @ofs: the memory segment offset
+ * @len: the memory segment length, in bytes
+ *
+ * This parses IWL_UCODE_TLV_FW_MEM_SEG
+ */
+struct iwl_fw_dbg_mem_seg_tlv {
+ __le32 data_type;
+ __le32 ofs;
+ __le32 len;
+} __packed;
+
+/**
* struct iwl_fw_dbg_dest_tlv - configures the destination of the debug data
*
* @version: version of the TLV - currently 0
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
index 2942571c6..e461d6318 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fw.h
@@ -286,6 +286,8 @@ struct iwl_fw {
struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
size_t dbg_conf_tlv_len[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv *dbg_trigger_tlv[FW_DBG_TRIGGER_MAX];
+ struct iwl_fw_dbg_mem_seg_tlv *dbg_mem_tlv[FW_DBG_MEM_MAX];
+ bool dbg_dynamic_mem;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index d1a5dd160..6c5c2f9f7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -92,6 +92,11 @@ enum iwl_amsdu_size {
IWL_AMSDU_12K = 2,
};
+enum iwl_uapsd_disable {
+ IWL_DISABLE_UAPSD_BSS = BIT(0),
+ IWL_DISABLE_UAPSD_P2P_CLIENT = BIT(1),
+};
+
/**
* struct iwl_mod_params
*
@@ -109,7 +114,8 @@ enum iwl_amsdu_size {
* @debug_level: levels are IWL_DL_*
* @ant_coupling: antenna coupling in dB, default = 0
* @nvm_file: specifies a external NVM file
- * @uapsd_disable: disable U-APSD, default = 1
+ * @uapsd_disable: disable U-APSD, see %enum iwl_uapsd_disable, default =
+ * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT
* @d0i3_disable: disable d0i3, default = 1,
* @d0i3_entry_delay: time to wait after no refs are taken before
* entering D0i3 (in msecs)
@@ -131,7 +137,7 @@ struct iwl_mod_params {
#endif
int ant_coupling;
char *nvm_file;
- bool uapsd_disable;
+ u32 uapsd_disable;
bool d0i3_disable;
unsigned int d0i3_entry_delay;
bool lar_disable;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 93a689583..21653fee8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -288,6 +288,9 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
!data->sku_cap_band_52GHz_enable)
continue;
+ if (ch_flags & NVM_CHANNEL_160MHZ)
+ data->vht160_supported = true;
+
if (!lar_supported && !(ch_flags & NVM_CHANNEL_VALID)) {
/*
* Channels might become valid later if lar is
@@ -308,7 +311,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->hw_value = nvm_chan[ch_idx];
channel->band = (ch_idx < num_2ghz_channels) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
channel->center_freq =
ieee80211_channel_to_frequency(
channel->hw_value, channel->band);
@@ -320,7 +323,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
* is not used in mvm, and is used for backwards compatibility
*/
channel->max_power = IWL_DEFAULT_MAX_TX_POWER;
- is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
+ is_5ghz = channel->band == NL80211_BAND_5GHZ;
/* don't put limitations in case we're using LAR */
if (!lar_supported)
@@ -331,17 +334,20 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
channel->flags = 0;
IWL_DEBUG_EEPROM(dev,
- "Ch. %d [%sGHz] %s%s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
+ "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n",
channel->hw_value,
is_5ghz ? "5.2" : "2.4",
+ ch_flags,
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(IBSS),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
CHECK_AND_PRINT_I(INDOOR_ONLY),
CHECK_AND_PRINT_I(GO_CONCURRENT),
- ch_flags,
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(40MHZ),
+ CHECK_AND_PRINT_I(80MHZ),
+ CHECK_AND_PRINT_I(160MHZ),
channel->max_power,
((ch_flags & NVM_CHANNEL_IBSS) &&
!(ch_flags & NVM_CHANNEL_RADAR))
@@ -370,6 +376,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
max_ampdu_exponent <<
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
+ if (data->vht160_supported)
+ vht_cap->cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ |
+ IEEE80211_VHT_CAP_SHORT_GI_160;
+
if (cfg->vht_mu_mimo_supported)
vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE;
@@ -439,22 +449,22 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
&ch_section[NVM_CHANNELS_FAMILY_8000],
lar_supported);
- sband = &data->bands[IEEE80211_BAND_2GHZ];
- sband->band = IEEE80211_BAND_2GHZ;
+ sband = &data->bands[NL80211_BAND_2GHZ];
+ sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_24_OFFS];
sband->n_bitrates = N_RATES_24;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_2GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
- sband = &data->bands[IEEE80211_BAND_5GHZ];
- sband->band = IEEE80211_BAND_5GHZ;
+ sband = &data->bands[NL80211_BAND_5GHZ];
+ sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
sband->n_bitrates = N_RATES_52;
n_used += iwl_init_sband_channels(data, sband, n_channels,
- IEEE80211_BAND_5GHZ);
- iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ);
+ iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_5GHZ,
tx_chains, rx_chains);
if (data->sku_cap_11ac_enable && !iwlwifi_mod_params.disable_11ac)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
@@ -781,7 +791,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *regd;
int size_of_regd;
struct ieee80211_reg_rule *rule;
- enum ieee80211_band band;
+ enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
@@ -809,7 +819,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
band = (ch_idx < NUM_2GHZ_CHANNELS) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
@@ -857,7 +867,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
IWL_DEBUG_DEV(dev, IWL_DL_LAR,
"Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s(0x%02x): Ad-Hoc %ssupported\n",
center_freq,
- band == IEEE80211_BAND_5GHZ ? "5.2" : "2.4",
+ band == NL80211_BAND_5GHZ ? "5.2" : "2.4",
CHECK_AND_PRINT_I(VALID),
CHECK_AND_PRINT_I(ACTIVE),
CHECK_AND_PRINT_I(RADAR),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
index 4a4dea087..7beba9ae5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.c
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -72,8 +73,6 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 9
-#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
u16 size;
@@ -86,14 +85,18 @@ struct iwl_phy_db_entry {
* @cfg: phy configuration.
* @calib_nch: non channel specific calibration data.
* @calib_ch: channel specific calibration data.
+ * @n_group_papd: number of entries in papd channel group.
* @calib_ch_group_papd: calibration data related to papd channel group.
+ * @n_group_txp: number of entries in tx power channel group.
* @calib_ch_group_txp: calibration data related to tx power chanel group.
*/
struct iwl_phy_db {
struct iwl_phy_db_entry cfg;
struct iwl_phy_db_entry calib_nch;
- struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
- struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
+ int n_group_papd;
+ struct iwl_phy_db_entry *calib_ch_group_papd;
+ int n_group_txp;
+ struct iwl_phy_db_entry *calib_ch_group_txp;
struct iwl_trans *trans;
};
@@ -143,6 +146,9 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans)
phy_db->trans = trans;
+ phy_db->n_group_txp = -1;
+ phy_db->n_group_papd = -1;
+
/* TODO: add default values of the phy db. */
return phy_db;
}
@@ -166,11 +172,11 @@ iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
case IWL_PHY_DB_CALIB_NCH:
return &phy_db->calib_nch;
case IWL_PHY_DB_CALIB_CHG_PAPD:
- if (chg_id >= IWL_NUM_PAPD_CH_GROUPS)
+ if (chg_id >= phy_db->n_group_papd)
return NULL;
return &phy_db->calib_ch_group_papd[chg_id];
case IWL_PHY_DB_CALIB_CHG_TXP:
- if (chg_id >= IWL_NUM_TXP_CH_GROUPS)
+ if (chg_id >= phy_db->n_group_txp)
return NULL;
return &phy_db->calib_ch_group_txp[chg_id];
default:
@@ -202,17 +208,21 @@ void iwl_phy_db_free(struct iwl_phy_db *phy_db)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
- for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
+
+ for (i = 0; i < phy_db->n_group_papd; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
+ kfree(phy_db->calib_ch_group_papd);
+
+ for (i = 0; i < phy_db->n_group_txp; i++)
iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
+ kfree(phy_db->calib_ch_group_txp);
kfree(phy_db);
}
IWL_EXPORT_SYMBOL(iwl_phy_db_free);
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
- gfp_t alloc_ctx)
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+ struct iwl_rx_packet *pkt)
{
struct iwl_calib_res_notif_phy_db *phy_db_notif =
(struct iwl_calib_res_notif_phy_db *)pkt->data;
@@ -224,16 +234,42 @@ int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
if (!phy_db)
return -EINVAL;
- if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
- type == IWL_PHY_DB_CALIB_CHG_TXP)
+ if (type == IWL_PHY_DB_CALIB_CHG_PAPD) {
chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+ if (phy_db && !phy_db->calib_ch_group_papd) {
+ /*
+ * Firmware sends the largest index first, so we can use
+ * it to know how much we should allocate.
+ */
+ phy_db->calib_ch_group_papd = kcalloc(chg_id + 1,
+ sizeof(struct iwl_phy_db_entry),
+ GFP_ATOMIC);
+ if (!phy_db->calib_ch_group_papd)
+ return -ENOMEM;
+ phy_db->n_group_papd = chg_id + 1;
+ }
+ } else if (type == IWL_PHY_DB_CALIB_CHG_TXP) {
+ chg_id = le16_to_cpup((__le16 *)phy_db_notif->data);
+ if (phy_db && !phy_db->calib_ch_group_txp) {
+ /*
+ * Firmware sends the largest index first, so we can use
+ * it to know how much we should allocate.
+ */
+ phy_db->calib_ch_group_txp = kcalloc(chg_id + 1,
+ sizeof(struct iwl_phy_db_entry),
+ GFP_ATOMIC);
+ if (!phy_db->calib_ch_group_txp)
+ return -ENOMEM;
+ phy_db->n_group_txp = chg_id + 1;
+ }
+ }
entry = iwl_phy_db_get_section(phy_db, type, chg_id);
if (!entry)
return -EINVAL;
kfree(entry->data);
- entry->data = kmemdup(phy_db_notif->data, size, alloc_ctx);
+ entry->data = kmemdup(phy_db_notif->data, size, GFP_ATOMIC);
if (!entry->data) {
entry->size = 0;
return -ENOMEM;
@@ -296,7 +332,7 @@ static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
if (ch_index == 0xff)
return 0xff;
- for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
+ for (i = 0; i < phy_db->n_group_txp; i++) {
txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
if (!txp_chg)
return 0xff;
@@ -447,7 +483,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_PAPD,
- IWL_NUM_PAPD_CH_GROUPS);
+ phy_db->n_group_papd);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific PAPD groups\n");
@@ -457,7 +493,7 @@ int iwl_send_phy_db_data(struct iwl_phy_db *phy_db)
/* Send all the TXP channel specific data */
err = iwl_phy_db_send_all_channel_groups(phy_db,
IWL_PHY_DB_CALIB_CHG_TXP,
- IWL_NUM_TXP_CH_GROUPS);
+ phy_db->n_group_txp);
if (err) {
IWL_ERR(phy_db->trans,
"Cannot send channel specific TX power groups\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
index 24103877e..d34de3f71 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-phy-db.h
@@ -73,8 +73,8 @@ struct iwl_phy_db *iwl_phy_db_init(struct iwl_trans *trans);
void iwl_phy_db_free(struct iwl_phy_db *phy_db);
-int iwl_phy_db_set_section(struct iwl_phy_db *phy_db, struct iwl_rx_packet *pkt,
- gfp_t alloc_ctx);
+int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
+ struct iwl_rx_packet *pkt);
int iwl_send_phy_db_data(struct iwl_phy_db *phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index c46e596e1..6c1d20ded 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -345,6 +347,16 @@ enum secure_load_status_reg {
#define TXF_READ_MODIFY_DATA (0xa00448)
#define TXF_READ_MODIFY_ADDR (0xa0044c)
+/* UMAC Internal Tx Fifo */
+#define TXF_CPU2_FIFO_ITEM_CNT (0xA00538)
+#define TXF_CPU2_WR_PTR (0xA00514)
+#define TXF_CPU2_RD_PTR (0xA00510)
+#define TXF_CPU2_FENCE_PTR (0xA00518)
+#define TXF_CPU2_LOCK_FENCE (0xA00524)
+#define TXF_CPU2_NUM (0xA0053C)
+#define TXF_CPU2_READ_MODIFY_DATA (0xA00548)
+#define TXF_CPU2_READ_MODIFY_ADDR (0xA0054C)
+
/* Radio registers access */
#define RSP_RADIO_CMD (0xa02804)
#define RSP_RADIO_RDDAT (0xa02814)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 91d74b3f6..8193d36ae 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -7,6 +7,7 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -519,7 +521,7 @@ struct iwl_trans;
struct iwl_trans_txq_scd_cfg {
u8 fifo;
- s8 sta_id;
+ u8 sta_id;
u8 tid;
bool aggregate;
int frame_limit;
@@ -751,6 +753,7 @@ enum iwl_plat_pm_mode {
* @dev - pointer to struct device * that represents the device
* @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
* 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
+ * @hw_rf_id a u32 with the device RF ID
* @hw_id: a u32 with the ID of the device / sub-device.
* Set during transport allocation.
* @hw_id_str: a string with info about HW ID. Set during transport allocation.
@@ -795,6 +798,7 @@ struct iwl_trans {
struct device *dev;
u32 max_skb_frags;
u32 hw_rev;
+ u32 hw_rf_id;
u32 hw_id;
char hw_id_str[52];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 23e7e2937..2e06dfc1c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o coex.o coex_legacy.o
+iwlmvm-y += power.o coex.o
iwlmvm-y += tt.o offloading.o tdls.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 2e098f8e0..a63f5bbb1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -378,7 +378,7 @@ iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ) {
rcu_read_unlock();
return BT_COEX_INVALID_LUT;
}
@@ -411,9 +411,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
struct iwl_bt_coex_cmd bt_cmd = {};
u32 mode;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_send_bt_init_conf_old(mvm);
-
lockdep_assert_held(&mvm->mutex);
if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
@@ -540,7 +537,7 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
+ chanctx_conf->def.chan->band != NL80211_BAND_2GHZ)) {
if (vif->type == NL80211_IFTYPE_STATION) {
/* ... relax constraints and disable rssi events */
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
@@ -728,12 +725,6 @@ void iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_bt_coex_profile_notif *notif = (void *)pkt->data;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_rx_bt_coex_notif_old(mvm, rxb);
- return;
- }
-
IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
@@ -755,12 +746,6 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_bt_rssi_event_old(mvm, vif, rssi_event);
- return;
- }
-
lockdep_assert_held(&mvm->mutex);
/* Ignore updates if we are in force mode */
@@ -807,9 +792,6 @@ u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_coex_agg_time_limit_old(mvm, sta);
-
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return LINK_QUAL_AGG_TIME_LIMIT_DEF;
@@ -834,9 +816,6 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct iwl_mvm_phy_ctxt *phy_ctxt = mvmvif->phy_ctxt;
enum iwl_bt_coex_lut_type lut_type;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_mimo_allowed_old(mvm, sta);
-
if (IWL_COEX_IS_TTC_ON(mvm->last_bt_notif.ttc_rrc_status, phy_ctxt->id))
return true;
@@ -864,9 +843,6 @@ bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant)
if (ant & mvm->cfg->non_shared_ant)
return true;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) <
BT_HIGH_TRAFFIC;
}
@@ -877,21 +853,15 @@ bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm)
if (mvm->cfg->bt_shared_single_ant)
return true;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_shared_ant_avail_old(mvm);
-
return le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) < BT_HIGH_TRAFFIC;
}
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u32 bt_activity = le32_to_cpu(mvm->last_bt_notif.bt_activity_grading);
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_BT_COEX_SPLIT))
- return iwl_mvm_bt_coex_is_tpc_allowed_old(mvm, band);
-
- if (band != IEEE80211_BAND_2GHZ)
+ if (band != NL80211_BAND_2GHZ)
return false;
return bt_activity >= BT_LOW_TRAFFIC;
@@ -903,7 +873,7 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
__le16 fc = hdr->frame_control;
bool mplut_enabled = iwl_mvm_is_mplut_supported(mvm);
- if (info->band != IEEE80211_BAND_2GHZ)
+ if (info->band != NL80211_BAND_2GHZ)
return 0;
if (unlikely(mvm->bt_tx_prio))
@@ -937,12 +907,6 @@ u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_bt_coex_vif_change_old(mvm);
- return;
- }
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
@@ -955,12 +919,6 @@ void iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- iwl_mvm_rx_ant_coupling_notif_old(mvm, rxb);
- return;
- }
-
if (!iwl_mvm_bt_is_plcr_supported(mvm))
return;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
deleted file mode 100644
index 015045733..000000000
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex_legacy.c
+++ /dev/null
@@ -1,1315 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2013 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/ieee80211.h>
-#include <linux/etherdevice.h>
-#include <net/mac80211.h>
-
-#include "fw-api-coex.h"
-#include "iwl-modparams.h"
-#include "mvm.h"
-#include "iwl-debug.h"
-
-#define EVENT_PRIO_ANT(_evt, _prio, _shrd_ant) \
- [(_evt)] = (((_prio) << BT_COEX_PRIO_TBL_PRIO_POS) | \
- ((_shrd_ant) << BT_COEX_PRIO_TBL_SHRD_ANT_POS))
-
-static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB1,
- BT_COEX_PRIO_TBL_PRIO_BYPASS, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_INIT_CALIB2,
- BT_COEX_PRIO_TBL_PRIO_BYPASS, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW1,
- BT_COEX_PRIO_TBL_PRIO_LOW, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_LOW2,
- BT_COEX_PRIO_TBL_PRIO_LOW, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH1,
- BT_COEX_PRIO_TBL_PRIO_HIGH, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_PERIODIC_CALIB_HIGH2,
- BT_COEX_PRIO_TBL_PRIO_HIGH, 1),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_DTIM,
- BT_COEX_PRIO_TBL_DISABLED, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN52,
- BT_COEX_PRIO_TBL_PRIO_COEX_OFF, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_SCAN24,
- BT_COEX_PRIO_TBL_PRIO_COEX_ON, 0),
- EVENT_PRIO_ANT(BT_COEX_PRIO_TBL_EVT_IDLE,
- BT_COEX_PRIO_TBL_PRIO_COEX_IDLE, 0),
- 0, 0, 0, 0, 0, 0,
-};
-
-#undef EVENT_PRIO_ANT
-
-static int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm)
-{
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BT_COEX_PRIO_TABLE, 0,
- sizeof(struct iwl_bt_coex_prio_tbl_cmd),
- &iwl_bt_prio_tbl);
-}
-
-static const __le32 iwl_bt_prio_boost[BT_COEX_BOOST_SIZE] = {
- cpu_to_le32(0xf0f0f0f0), /* 50% */
- cpu_to_le32(0xc0c0c0c0), /* 25% */
- cpu_to_le32(0xfcfcfcfc), /* 75% */
- cpu_to_le32(0xfefefefe), /* 87.5% */
-};
-
-static const __le32 iwl_single_shared_ant[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
- {
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x40000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x44000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- },
-};
-
-static const __le32 iwl_combined_lookup[BT_COEX_MAX_LUT][BT_COEX_LUT_SIZE] = {
- {
- /* Tight */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0x00004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Loose */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
- {
- /* Tx Tx disabled */
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xeeaaaaaa),
- cpu_to_le32(0xaaaaaaaa),
- cpu_to_le32(0xcc00ff28),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xcc00aaaa),
- cpu_to_le32(0x0000aaaa),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xc0004000),
- cpu_to_le32(0xf0005000),
- cpu_to_le32(0xf0005000),
- },
-};
-
-/* 20MHz / 40MHz below / 40Mhz above*/
-static const __le64 iwl_ci_mask[][3] = {
- /* dummy entry for channel 0 */
- {cpu_to_le64(0), cpu_to_le64(0), cpu_to_le64(0)},
- {
- cpu_to_le64(0x0000001FFFULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x00007FFFFFULL),
- },
- {
- cpu_to_le64(0x000000FFFFULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0003FFFFFFULL),
- },
- {
- cpu_to_le64(0x000003FFFCULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x000FFFFFFCULL),
- },
- {
- cpu_to_le64(0x00001FFFE0ULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x007FFFFFE0ULL),
- },
- {
- cpu_to_le64(0x00007FFF80ULL),
- cpu_to_le64(0x00007FFFFFULL),
- cpu_to_le64(0x01FFFFFF80ULL),
- },
- {
- cpu_to_le64(0x0003FFFC00ULL),
- cpu_to_le64(0x0003FFFFFFULL),
- cpu_to_le64(0x0FFFFFFC00ULL),
- },
- {
- cpu_to_le64(0x000FFFF000ULL),
- cpu_to_le64(0x000FFFFFFCULL),
- cpu_to_le64(0x3FFFFFF000ULL),
- },
- {
- cpu_to_le64(0x007FFF8000ULL),
- cpu_to_le64(0x007FFFFFE0ULL),
- cpu_to_le64(0xFFFFFF8000ULL),
- },
- {
- cpu_to_le64(0x01FFFE0000ULL),
- cpu_to_le64(0x01FFFFFF80ULL),
- cpu_to_le64(0xFFFFFE0000ULL),
- },
- {
- cpu_to_le64(0x0FFFF00000ULL),
- cpu_to_le64(0x0FFFFFFC00ULL),
- cpu_to_le64(0x0ULL),
- },
- {
- cpu_to_le64(0x3FFFC00000ULL),
- cpu_to_le64(0x3FFFFFF000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFFE000000ULL),
- cpu_to_le64(0xFFFFFF8000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFF8000000ULL),
- cpu_to_le64(0xFFFFFE0000ULL),
- cpu_to_le64(0x0)
- },
- {
- cpu_to_le64(0xFFC0000000ULL),
- cpu_to_le64(0x0ULL),
- cpu_to_le64(0x0ULL)
- },
-};
-
-enum iwl_bt_kill_msk {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_MAX,
-};
-
-static const u32 iwl_bt_ctl_kill_msk[BT_KILL_MSK_MAX] = {
- [BT_KILL_MSK_DEFAULT] = 0xfffffc00,
- [BT_KILL_MSK_NEVER] = 0xffffffff,
- [BT_KILL_MSK_ALWAYS] = 0,
-};
-
-static const u8 iwl_bt_cts_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_NEVER,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_NEVER,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-static const u8 iwl_bt_ack_kill_msk[BT_MAX_AG][BT_COEX_MAX_LUT] = {
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_ALWAYS,
- },
- {
- BT_KILL_MSK_DEFAULT,
- BT_KILL_MSK_ALWAYS,
- BT_KILL_MSK_DEFAULT,
- },
-};
-
-struct corunning_block_luts {
- u8 range;
- __le32 lut20[BT_COEX_CORUN_LUT_SIZE];
-};
-
-/*
- * Ranges for the antenna coupling calibration / co-running block LUT:
- * LUT0: [ 0, 12[
- * LUT1: [12, 20[
- * LUT2: [20, 21[
- * LUT3: [21, 23[
- * LUT4: [23, 27[
- * LUT5: [27, 30[
- * LUT6: [30, 32[
- * LUT7: [32, 33[
- * LUT8: [33, - [
- */
-static const struct corunning_block_luts antenna_coupling_ranges[] = {
- {
- .range = 0,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 12,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 20,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 21,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 23,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 27,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 30,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 32,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
- {
- .range = 33,
- .lut20 = {
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
- },
- },
-};
-
-static enum iwl_bt_coex_lut_type
-iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum iwl_bt_coex_lut_type ret;
- u16 phy_ctx_id;
-
- /*
- * Checking that we hold mvm->mutex is a good idea, but the rate
- * control can't acquire the mutex since it runs in Tx path.
- * So this is racy in that case, but in the worst case, the AMPDU
- * size limit will be wrong for a short time which is not a big
- * issue.
- */
-
- rcu_read_lock();
-
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return BT_COEX_INVALID_LUT;
- }
-
- ret = BT_COEX_TX_DIS_LUT;
-
- if (mvm->cfg->bt_shared_single_ant) {
- rcu_read_unlock();
- return ret;
- }
-
- phy_ctx_id = *((u16 *)chanctx_conf->drv_priv);
-
- if (mvm->last_bt_ci_cmd_old.primary_ch_phy_id == phy_ctx_id)
- ret = le32_to_cpu(mvm->last_bt_notif_old.primary_ch_lut);
- else if (mvm->last_bt_ci_cmd_old.secondary_ch_phy_id == phy_ctx_id)
- ret = le32_to_cpu(mvm->last_bt_notif_old.secondary_ch_lut);
- /* else - default = TX TX disallowed */
-
- rcu_read_unlock();
-
- return ret;
-}
-
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm)
-{
- struct iwl_bt_coex_cmd_old *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret;
- u32 flags;
-
- ret = iwl_send_bt_prio_tbl(mvm);
- if (ret)
- return ret;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS)) {
- switch (mvm->bt_force_ant_mode) {
- case BT_FORCE_ANT_AUTO:
- flags = BT_COEX_AUTO_OLD;
- break;
- case BT_FORCE_ANT_BT:
- flags = BT_COEX_BT_OLD;
- break;
- case BT_FORCE_ANT_WIFI:
- flags = BT_COEX_WIFI_OLD;
- break;
- default:
- WARN_ON(1);
- flags = 0;
- }
-
- bt_cmd->flags = cpu_to_le32(flags);
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE);
- goto send_cmd;
- }
-
- bt_cmd->max_kill = 5;
- bt_cmd->bt4_antenna_isolation_thr =
- IWL_MVM_BT_COEX_ANTENNA_COUPLING_THRS;
- bt_cmd->bt4_antenna_isolation = iwlwifi_mod_params.ant_coupling;
- bt_cmd->bt4_tx_tx_delta_freq_thr = 15;
- bt_cmd->bt4_tx_rx_max_freq0 = 15;
- bt_cmd->override_primary_lut = BT_COEX_INVALID_LUT;
- bt_cmd->override_secondary_lut = BT_COEX_INVALID_LUT;
-
- flags = iwlwifi_mod_params.bt_coex_active ?
- BT_COEX_NW_OLD : BT_COEX_DISABLE_OLD;
- bt_cmd->flags = cpu_to_le32(flags);
-
- bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_BT_PRIO_BOOST |
- BT_VALID_MAX_KILL |
- BT_VALID_3W_TMRS |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS |
- BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT |
- BT_VALID_WIFI_RX_SW_PRIO_BOOST |
- BT_VALID_WIFI_TX_SW_PRIO_BOOST |
- BT_VALID_ANT_ISOLATION |
- BT_VALID_ANT_ISOLATION_THRS |
- BT_VALID_TXTX_DELTA_FREQ_THRS |
- BT_VALID_TXRX_MAX_FREQ_0 |
- BT_VALID_SYNC_TO_SCO |
- BT_VALID_TTC |
- BT_VALID_RRC);
-
- if (IWL_MVM_BT_COEX_SYNC2SCO)
- bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
-
- if (iwl_mvm_bt_is_plcr_supported(mvm)) {
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
- bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
- }
-
- if (IWL_MVM_BT_COEX_MPLUT) {
- bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
- }
-
- if (IWL_MVM_BT_COEX_TTC)
- bt_cmd->flags |= cpu_to_le32(BT_COEX_TTC);
-
- if (iwl_mvm_bt_is_rrc_supported(mvm))
- bt_cmd->flags |= cpu_to_le32(BT_COEX_RRC);
-
- if (mvm->cfg->bt_shared_single_ant)
- memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
- sizeof(iwl_single_shared_ant));
- else
- memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
- sizeof(iwl_combined_lookup));
-
- /* Take first Co-running block LUT to get started */
- memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
- sizeof(bt_cmd->bt4_corun_lut20));
- memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
- sizeof(bt_cmd->bt4_corun_lut40));
-
- memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
- sizeof(iwl_bt_prio_boost));
- bt_cmd->bt4_multiprio_lut[0] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG0);
- bt_cmd->bt4_multiprio_lut[1] = cpu_to_le32(IWL_MVM_BT_COEX_MPLUT_REG1);
-
-send_cmd:
- memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
- memset(&mvm->last_bt_ci_cmd_old, 0, sizeof(mvm->last_bt_ci_cmd_old));
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm)
-{
- struct iwl_bt_coex_profile_notif_old *notif = &mvm->last_bt_notif_old;
- u32 primary_lut = le32_to_cpu(notif->primary_ch_lut);
- u32 ag = le32_to_cpu(notif->bt_activity_grading);
- struct iwl_bt_coex_cmd_old *bt_cmd;
- u8 ack_kill_msk, cts_kill_msk;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .data[0] = &bt_cmd,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
- int ret = 0;
-
- lockdep_assert_held(&mvm->mutex);
-
- ack_kill_msk = iwl_bt_ack_kill_msk[ag][primary_lut];
- cts_kill_msk = iwl_bt_cts_kill_msk[ag][primary_lut];
-
- if (mvm->bt_ack_kill_msk[0] == ack_kill_msk &&
- mvm->bt_cts_kill_msk[0] == cts_kill_msk)
- return 0;
-
- mvm->bt_ack_kill_msk[0] = ack_kill_msk;
- mvm->bt_cts_kill_msk[0] = cts_kill_msk;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
- bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[ack_kill_msk]);
- bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_ctl_kill_msk[cts_kill_msk]);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS);
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
-{
- struct iwl_bt_coex_cmd_old *bt_cmd;
- /* Send ASYNC since this can be sent from an atomic context */
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_DUP, },
- .flags = CMD_ASYNC,
- };
- struct iwl_mvm_sta *mvmsta;
- int ret;
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
- if (!mvmsta)
- return 0;
-
- /* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
- return 0;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
- if (!bt_cmd)
- return -ENOMEM;
- cmd.data[0] = bt_cmd;
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
-
- bt_cmd->valid_bit_msk =
- cpu_to_le32(BT_VALID_ENABLE | BT_VALID_REDUCED_TX_POWER);
- bt_cmd->bt_reduced_tx_power = sta_id;
-
- if (enable)
- bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
-
- IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
- enable ? "en" : "dis", sta_id);
-
- mvmsta->bt_reduced_txpower = enable;
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
-
- kfree(bt_cmd);
- return ret;
-}
-
-struct iwl_bt_iterator_data {
- struct iwl_bt_coex_profile_notif_old *notif;
- struct iwl_mvm *mvm;
- struct ieee80211_chanctx_conf *primary;
- struct ieee80211_chanctx_conf *secondary;
- bool primary_ll;
-};
-
-static inline
-void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool enable, int rssi)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->bf_data.last_bt_coex_event = rssi;
- mvmvif->bf_data.bt_coex_max_thold =
- enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
- mvmvif->bf_data.bt_coex_min_thold =
- enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
-}
-
-/* must be called under rcu_read_lock */
-static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_smps_mode smps_mode;
- u32 bt_activity_grading;
- int ave_rssi;
-
- lockdep_assert_held(&mvm->mutex);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- /* default smps_mode for BSS / P2P client is AUTOMATIC */
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
- break;
- case NL80211_IFTYPE_AP:
- if (!mvmvif->ap_ibss_active)
- return;
- break;
- default:
- return;
- }
-
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
-
- /* If channel context is invalid or not on 2.4GHz .. */
- if ((!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- if (vif->type == NL80211_IFTYPE_STATION) {
- /* ... relax constraints and disable rssi events */
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- false);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- }
- return;
- }
-
- bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
- if (bt_activity_grading >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (bt_activity_grading >= BT_LOW_TRAFFIC)
- smps_mode = vif->type == NL80211_IFTYPE_AP ?
- IEEE80211_SMPS_OFF :
- IEEE80211_SMPS_DYNAMIC;
-
- /* relax SMPS contraints for next association */
- if (!vif->bss_conf.assoc)
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
- if (mvmvif->phy_ctxt &&
- data->notif->rrc_enabled & BIT(mvmvif->phy_ctxt->id))
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status, bt_activity_grading,
- smps_mode);
-
- if (vif->type == NL80211_IFTYPE_STATION)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
-
- /* low latency is always primary */
- if (iwl_mvm_vif_low_latency(mvmvif)) {
- data->primary_ll = true;
-
- data->secondary = data->primary;
- data->primary = chanctx_conf;
- }
-
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
-
- if (chanctx_conf == data->primary)
- return;
-
- if (!data->primary_ll) {
- /*
- * downgrade the current primary no matter what its
- * type is.
- */
- data->secondary = data->primary;
- data->primary = chanctx_conf;
- } else {
- /* there is low latency vif - we will be secondary */
- data->secondary = chanctx_conf;
- }
- return;
- }
-
- /*
- * STA / P2P Client, try to be primary if first vif. If we are in low
- * latency mode, we are already in primary and just don't do much
- */
- if (!data->primary || data->primary == chanctx_conf)
- data->primary = chanctx_conf;
- else if (!data->secondary)
- /* if secondary is not NULL, it might be a GO */
- data->secondary = chanctx_conf;
-
- /*
- * don't reduce the Tx power if one of these is true:
- * we are in LOOSE
- * single share antenna product
- * BT is active
- * we are associated
- */
- if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
- mvm->cfg->bt_shared_single_ant || !vif->bss_conf.assoc ||
- !data->notif->bt_status) {
- iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
- return;
- }
-
- /* try to get the avg rssi from fw */
- ave_rssi = mvmvif->bf_data.ave_beacon_signal;
-
- /* if the RSSI isn't valid, fake it is very low */
- if (!ave_rssi)
- ave_rssi = -100;
- if (ave_rssi > -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH) {
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- } else if (ave_rssi < -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH) {
- if (iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, false))
- IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
- }
-
- /* Begin to monitor the RSSI: it may influence the reduced Tx power */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
-}
-
-static void iwl_mvm_bt_coex_notif_handle(struct iwl_mvm *mvm)
-{
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- .notif = &mvm->last_bt_notif_old,
- };
- struct iwl_bt_coex_ci_cmd_old cmd = {};
- u8 ci_bw_idx;
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- rcu_read_lock();
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_notif_iterator, &data);
-
- if (data.primary) {
- struct ieee80211_chanctx_conf *chan = data.primary;
-
- if (WARN_ON(!chan->def.chan)) {
- rcu_read_unlock();
- return;
- }
-
- if (chan->def.width < NL80211_CHAN_WIDTH_40) {
- ci_bw_idx = 0;
- cmd.co_run_bw_primary = 0;
- } else {
- cmd.co_run_bw_primary = 1;
- if (chan->def.center_freq1 >
- chan->def.chan->center_freq)
- ci_bw_idx = 2;
- else
- ci_bw_idx = 1;
- }
-
- cmd.bt_primary_ci =
- iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
- cmd.primary_ch_phy_id = *((u16 *)data.primary->drv_priv);
- }
-
- if (data.secondary) {
- struct ieee80211_chanctx_conf *chan = data.secondary;
-
- if (WARN_ON(!data.secondary->def.chan)) {
- rcu_read_unlock();
- return;
- }
-
- if (chan->def.width < NL80211_CHAN_WIDTH_40) {
- ci_bw_idx = 0;
- cmd.co_run_bw_secondary = 0;
- } else {
- cmd.co_run_bw_secondary = 1;
- if (chan->def.center_freq1 >
- chan->def.chan->center_freq)
- ci_bw_idx = 2;
- else
- ci_bw_idx = 1;
- }
-
- cmd.bt_secondary_ci =
- iwl_ci_mask[chan->def.chan->hw_value][ci_bw_idx];
- cmd.secondary_ch_phy_id = *((u16 *)data.secondary->drv_priv);
- }
-
- rcu_read_unlock();
-
- /* Don't spam the fw with the same command over and over */
- if (memcmp(&cmd, &mvm->last_bt_ci_cmd_old, sizeof(cmd))) {
- if (iwl_mvm_send_cmd_pdu(mvm, BT_COEX_CI, 0,
- sizeof(cmd), &cmd))
- IWL_ERR(mvm, "Failed to send BT_CI cmd\n");
- memcpy(&mvm->last_bt_ci_cmd_old, &cmd, sizeof(cmd));
- }
-
- if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_bt_coex_profile_notif_old *notif = (void *)pkt->data;
-
- IWL_DEBUG_COEX(mvm, "BT Coex Notification received\n");
- IWL_DEBUG_COEX(mvm, "\tBT status: %s\n",
- notif->bt_status ? "ON" : "OFF");
- IWL_DEBUG_COEX(mvm, "\tBT open conn %d\n", notif->bt_open_conn);
- IWL_DEBUG_COEX(mvm, "\tBT ci compliance %d\n", notif->bt_ci_compliance);
- IWL_DEBUG_COEX(mvm, "\tBT primary_ch_lut %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- IWL_DEBUG_COEX(mvm, "\tBT secondary_ch_lut %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- IWL_DEBUG_COEX(mvm, "\tBT activity grading %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- IWL_DEBUG_COEX(mvm, "\tBT agg traffic load %d\n",
- notif->bt_agg_traffic_load);
-
- /* remember this notification for future use: rssi fluctuations */
- memcpy(&mvm->last_bt_notif_old, notif, sizeof(mvm->last_bt_notif_old));
-
- iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
-
- struct ieee80211_sta *sta;
- struct iwl_mvm_sta *mvmsta;
-
- struct ieee80211_chanctx_conf *chanctx_conf;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- /* If channel context is invalid or not on 2.4GHz - don't count it */
- if (!chanctx_conf ||
- chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ) {
- rcu_read_unlock();
- return;
- }
- rcu_read_unlock();
-
- if (vif->type != NL80211_IFTYPE_STATION ||
- mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
- return;
-
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-}
-
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event_data rssi_event)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_bt_iterator_data data = {
- .mvm = mvm,
- };
- int ret;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- /*
- * Rssi update while not associated - can happen since the statistics
- * are handled asynchronously
- */
- if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
- return;
-
- /* No BT - reports should be disabled */
- if (!mvm->last_bt_notif_old.bt_status)
- return;
-
- IWL_DEBUG_COEX(mvm, "RSSI for %pM is now %s\n", vif->bss_conf.bssid,
- rssi_event == RSSI_EVENT_HIGH ? "HIGH" : "LOW");
-
- /*
- * Check if rssi is good enough for reduced Tx power, but not in loose
- * scheme.
- */
- if (rssi_event == RSSI_EVENT_LOW || mvm->cfg->bt_shared_single_ant ||
- iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT)
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
- false);
- else
- ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id, true);
-
- if (ret)
- IWL_ERR(mvm, "couldn't send BT_CONFIG HCMD upon RSSI event\n");
-
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bt_rssi_iterator, &data);
-
- if (iwl_mvm_bt_udpate_ctrl_kill_msk(mvm))
- IWL_ERR(mvm, "Failed to update the ctrl_kill_msk\n");
-}
-
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
-#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
-
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum iwl_bt_coex_lut_type lut_type;
-
- if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
- BT_HIGH_TRAFFIC)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- if (mvm->last_bt_notif_old.ttc_enabled)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
-
- if (lut_type == BT_COEX_LOOSE_LUT || lut_type == BT_COEX_INVALID_LUT)
- return LINK_QUAL_AGG_TIME_LIMIT_DEF;
-
- /* tight coex, high bt traffic, reduce AGG time limit */
- return LINK_QUAL_AGG_TIME_LIMIT_BT_ACT;
-}
-
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta)
-{
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- enum iwl_bt_coex_lut_type lut_type;
-
- if (mvm->last_bt_notif_old.ttc_enabled)
- return true;
-
- if (le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading) <
- BT_HIGH_TRAFFIC)
- return true;
-
- /*
- * In Tight / TxTxDis, BT can't Rx while we Tx, so use both antennas
- * since BT is already killed.
- * In Loose, BT can Rx while we Tx, so forbid MIMO to let BT Rx while
- * we Tx.
- * When we are in 5GHz, we'll get BT_COEX_INVALID_LUT allowing MIMO.
- */
- lut_type = iwl_get_coex_type(mvm, mvmsta->vif);
- return lut_type != BT_COEX_LOOSE_LUT;
-}
-
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm)
-{
- u32 ag = le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
- return ag < BT_HIGH_TRAFFIC;
-}
-
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
- enum ieee80211_band band)
-{
- u32 bt_activity =
- le32_to_cpu(mvm->last_bt_notif_old.bt_activity_grading);
-
- if (band != IEEE80211_BAND_2GHZ)
- return false;
-
- return bt_activity >= BT_LOW_TRAFFIC;
-}
-
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm)
-{
- iwl_mvm_bt_coex_notif_handle(mvm);
-}
-
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 ant_isolation = le32_to_cpup((void *)pkt->data);
- u8 __maybe_unused lower_bound, upper_bound;
- u8 lut;
-
- struct iwl_bt_coex_cmd_old *bt_cmd;
- struct iwl_host_cmd cmd = {
- .id = BT_CONFIG,
- .len = { sizeof(*bt_cmd), },
- .dataflags = { IWL_HCMD_DFL_NOCOPY, },
- };
-
- if (!iwl_mvm_bt_is_plcr_supported(mvm))
- return;
-
- lockdep_assert_held(&mvm->mutex);
-
- /* Ignore updates if we are in force mode */
- if (unlikely(mvm->bt_force_ant_mode != BT_FORCE_ANT_DIS))
- return;
-
- if (ant_isolation == mvm->last_ant_isol)
- return;
-
- for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
- if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
- break;
-
- lower_bound = antenna_coupling_ranges[lut].range;
-
- if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
- upper_bound = antenna_coupling_ranges[lut + 1].range;
- else
- upper_bound = antenna_coupling_ranges[lut].range;
-
- IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
- ant_isolation, lower_bound, upper_bound, lut);
-
- mvm->last_ant_isol = ant_isolation;
-
- if (mvm->last_corun_lut == lut)
- return;
-
- mvm->last_corun_lut = lut;
-
- bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
- if (!bt_cmd)
- return;
- cmd.data[0] = bt_cmd;
-
- bt_cmd->flags = cpu_to_le32(BT_COEX_NW_OLD);
- bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
- BT_VALID_CORUN_LUT_20 |
- BT_VALID_CORUN_LUT_40);
-
- /* For the moment, use the same LUT for 20GHz and 40GHz */
- memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
- sizeof(bt_cmd->bt4_corun_lut20));
-
- memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
- sizeof(bt_cmd->bt4_corun_lut40));
-
- if (iwl_mvm_send_cmd(mvm, &cmd))
- IWL_ERR(mvm, "failed to send BT_CONFIG command\n");
-
- kfree(bt_cmd);
-}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index 4b560e441..4eeb6b78d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -75,7 +75,6 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_UAPSD_STANDALONE 0
#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
@@ -110,6 +109,7 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
+#define IWL_MVM_HW_CSUM_DISABLE 0
#define IWL_MVM_COLLECT_FW_ERR_DUMP 1
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index c1a313149..4fdc3dad3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -723,7 +723,7 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
- ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false);
+ ret = iwl_mvm_sta_send_to_fw(mvm, ap_sta, false, 0);
if (ret)
return ret;
rcu_assign_pointer(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id], ap_sta);
@@ -1804,7 +1804,6 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct iwl_wowlan_status *fw_status;
int i;
bool keep;
- struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
@@ -1823,13 +1822,10 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
status.wake_packet = fw_status->wake_packet;
/* still at hard-coded place 0 for D3 image */
- ap_sta = rcu_dereference_protected(
- mvm->fw_id_to_mac_id[0],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(ap_sta))
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ if (!mvm_ap_sta)
goto out_free;
- mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = status.qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 14004456b..b23271755 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -281,13 +281,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
if (vif->type == NL80211_IFTYPE_STATION &&
ap_sta_id != IWL_MVM_STATION_COUNT) {
- struct ieee80211_sta *sta;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[ap_sta_id],
- lockdep_is_held(&mvm->mutex));
- if (!IS_ERR_OR_NULL(sta)) {
- struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_sta *mvm_sta;
+ mvm_sta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
+ if (mvm_sta) {
pos += scnprintf(buf+pos, bufsz-pos,
"ap_sta_id %d - reduced Tx power %d\n",
ap_sta_id,
@@ -724,9 +721,9 @@ static ssize_t iwl_dbgfs_tof_responder_params_write(struct ieee80211_vif *vif,
ret = kstrtou32(data, 10, &value);
if (ret == 0 && value) {
- enum ieee80211_band band = (cmd->channel_num <= 14) ?
- IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
+ enum nl80211_band band = (cmd->channel_num <= 14) ?
+ NL80211_BAND_2GHZ :
+ NL80211_BAND_5GHZ;
struct ieee80211_channel chn = {
.band = band,
.center_freq = ieee80211_channel_to_frequency(
@@ -1425,6 +1422,89 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static const char * const chanwidths[] = {
+ [NL80211_CHAN_WIDTH_20_NOHT] = "noht",
+ [NL80211_CHAN_WIDTH_20] = "ht20",
+ [NL80211_CHAN_WIDTH_40] = "ht40",
+ [NL80211_CHAN_WIDTH_80] = "vht80",
+ [NL80211_CHAN_WIDTH_80P80] = "vht80p80",
+ [NL80211_CHAN_WIDTH_160] = "vht160",
+};
+
+static bool iwl_mvm_lqm_notif_wait(struct iwl_notif_wait_data *notif_wait,
+ struct iwl_rx_packet *pkt, void *data)
+{
+ struct ieee80211_vif *vif = data;
+ struct iwl_mvm *mvm =
+ container_of(notif_wait, struct iwl_mvm, notif_wait);
+ struct iwl_link_qual_msrmnt_notif *report = (void *)pkt->data;
+ u32 num_of_stations = le32_to_cpu(report->number_of_stations);
+ int i;
+
+ IWL_INFO(mvm, "LQM report:\n");
+ IWL_INFO(mvm, "\tstatus: %d\n", report->status);
+ IWL_INFO(mvm, "\tmacID: %d\n", le32_to_cpu(report->mac_id));
+ IWL_INFO(mvm, "\ttx_frame_dropped: %d\n",
+ le32_to_cpu(report->tx_frame_dropped));
+ IWL_INFO(mvm, "\ttime_in_measurement_window: %d us\n",
+ le32_to_cpu(report->time_in_measurement_window));
+ IWL_INFO(mvm, "\ttotal_air_time_other_stations: %d\n",
+ le32_to_cpu(report->total_air_time_other_stations));
+ IWL_INFO(mvm, "\tchannel_freq: %d\n",
+ vif->bss_conf.chandef.center_freq1);
+ IWL_INFO(mvm, "\tchannel_width: %s\n",
+ chanwidths[vif->bss_conf.chandef.width]);
+ IWL_INFO(mvm, "\tnumber_of_stations: %d\n", num_of_stations);
+ for (i = 0; i < num_of_stations; i++)
+ IWL_INFO(mvm, "\t\tsta[%d]: %d\n", i,
+ report->frequent_stations_air_time[i]);
+
+ return true;
+}
+
+static ssize_t iwl_dbgfs_lqm_send_cmd_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_notification_wait wait_lqm_notif;
+ static u16 lqm_notif[] = {
+ WIDE_ID(MAC_CONF_GROUP,
+ LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF)
+ };
+ int err;
+ u32 duration;
+ u32 timeout;
+
+ if (sscanf(buf, "%d,%d", &duration, &timeout) != 2)
+ return -EINVAL;
+
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_lqm_notif,
+ lqm_notif, ARRAY_SIZE(lqm_notif),
+ iwl_mvm_lqm_notif_wait, vif);
+ mutex_lock(&mvm->mutex);
+ err = iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_START_MEASUREMENT,
+ duration, timeout);
+ mutex_unlock(&mvm->mutex);
+
+ if (err) {
+ IWL_ERR(mvm, "Failed to send lqm cmdf(err=%d)\n", err);
+ iwl_remove_notification(&mvm->notif_wait, &wait_lqm_notif);
+ return err;
+ }
+
+ /* wait for 2 * timeout (safety guard) and convert to jiffies*/
+ timeout = msecs_to_jiffies((timeout * 2) / 1000);
+
+ err = iwl_wait_notification(&mvm->notif_wait, &wait_lqm_notif,
+ timeout);
+ if (err)
+ IWL_ERR(mvm, "Getting lqm notif timed out\n");
+
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1449,6 +1529,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_range_abort, 32);
MVM_DEBUGFS_READ_FILE_OPS(tof_range_response);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(tof_responder_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
+MVM_DEBUGFS_WRITE_FILE_OPS(lqm_send_cmd, 64);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -1488,6 +1569,7 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir,
S_IRUSR | S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(lqm_send_cmd, mvmvif->dbgfs_dir, S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a43b3921c..406cf1cb9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -65,6 +65,7 @@
*****************************************************************************/
#include <linux/vmalloc.h>
#include <linux/ieee80211.h>
+#include <linux/netdevice.h>
#include "mvm.h"
#include "fw-dbg.h"
@@ -463,69 +464,11 @@ int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
return pos;
}
-static
-int iwl_mvm_coex_dump_mbox_old(struct iwl_bt_coex_profile_notif_old *notif,
- char *buf, int pos, int bufsz)
-{
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
- BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
- BT_MBOX_PRINT(0, LE_PROF1, false);
- BT_MBOX_PRINT(0, LE_PROF2, false);
- BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
- BT_MBOX_PRINT(0, CHL_SEQ_N, false);
- BT_MBOX_PRINT(0, INBAND_S, false);
- BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
- BT_MBOX_PRINT(0, LE_SCAN, false);
- BT_MBOX_PRINT(0, LE_ADV, false);
- BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
- BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
- BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
- BT_MBOX_PRINT(1, IP_SR, false);
- BT_MBOX_PRINT(1, LE_MSTR, false);
- BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
- BT_MBOX_PRINT(1, MSG_TYPE, false);
- BT_MBOX_PRINT(1, SSN, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
- BT_MBOX_PRINT(2, SNIFF_ACT, false);
- BT_MBOX_PRINT(2, PAG, false);
- BT_MBOX_PRINT(2, INQUIRY, false);
- BT_MBOX_PRINT(2, CONN, false);
- BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
- BT_MBOX_PRINT(2, DISC, false);
- BT_MBOX_PRINT(2, SCO_TX_ACT, false);
- BT_MBOX_PRINT(2, SCO_RX_ACT, false);
- BT_MBOX_PRINT(2, ESCO_RE_TX, false);
- BT_MBOX_PRINT(2, SCO_DURATION, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
- BT_MBOX_PRINT(3, SCO_STATE, false);
- BT_MBOX_PRINT(3, SNIFF_STATE, false);
- BT_MBOX_PRINT(3, A2DP_STATE, false);
- BT_MBOX_PRINT(3, ACL_STATE, false);
- BT_MBOX_PRINT(3, MSTR_STATE, false);
- BT_MBOX_PRINT(3, OBX_STATE, false);
- BT_MBOX_PRINT(3, OPEN_CON_2, false);
- BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
- BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
- BT_MBOX_PRINT(3, INBAND_P, false);
- BT_MBOX_PRINT(3, MSG_TYPE_2, false);
- BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
- return pos;
-}
-
static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
char *buf;
int ret, pos = 0, bufsz = sizeof(char) * 1024;
@@ -535,52 +478,24 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
mutex_lock(&mvm->mutex);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- struct iwl_bt_coex_profile_notif_old *notif =
- &mvm->last_bt_notif_old;
-
- pos += iwl_mvm_coex_dump_mbox_old(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf+pos,
- bufsz-pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf+pos, bufsz-pos,
- "antenna isolation = %d CORUN LUT index = %d\n",
- mvm->last_ant_isol, mvm->last_corun_lut);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- notif->rrc_enabled);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_enabled);
- } else {
- struct iwl_bt_coex_profile_notif *notif =
- &mvm->last_bt_notif;
-
- pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf+pos, bufsz-pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf+pos, bufsz-pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf+pos, bufsz-pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf+pos,
- bufsz-pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf+pos, bufsz-pos,
- "antenna isolation = %d CORUN LUT index = %d\n",
- mvm->last_ant_isol, mvm->last_corun_lut);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- (notif->ttc_rrc_status >> 4) & 0xF);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_rrc_status & 0xF);
- }
+ pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
+ notif->bt_ci_compliance);
+ pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
+ le32_to_cpu(notif->primary_ch_lut));
+ pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
+ le32_to_cpu(notif->secondary_ch_lut));
+ pos += scnprintf(buf + pos,
+ bufsz - pos, "bt_activity_grading = %d\n",
+ le32_to_cpu(notif->bt_activity_grading));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "antenna isolation = %d CORUN LUT index = %d\n",
+ mvm->last_ant_isol, mvm->last_corun_lut);
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
+ (notif->ttc_rrc_status >> 4) & 0xF);
+ pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
+ notif->ttc_rrc_status & 0xF);
pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
IWL_MVM_BT_COEX_SYNC2SCO);
@@ -602,44 +517,20 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
char buf[256];
int bufsz = sizeof(buf);
int pos = 0;
mutex_lock(&mvm->mutex);
- if (!fw_has_api(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_API_BT_COEX_SPLIT)) {
- struct iwl_bt_coex_ci_cmd_old *cmd = &mvm->last_bt_ci_cmd_old;
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "Channel inhibition CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "BT Configuration CMD - 0=default, 1=never, 2=always\n");
- pos += scnprintf(buf+pos, bufsz-pos, "\tACK Kill msk idx %d\n",
- mvm->bt_ack_kill_msk[0]);
- pos += scnprintf(buf+pos, bufsz-pos, "\tCTS Kill msk idx %d\n",
- mvm->bt_cts_kill_msk[0]);
-
- } else {
- struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
-
- pos += scnprintf(buf+pos, bufsz-pos,
- "Channel inhibition CMD\n");
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf+pos, bufsz-pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
- }
+ pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tPrimary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_primary_ci));
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tSecondary Channel Bitmap 0x%016llx\n",
+ le64_to_cpu(cmd->bt_secondary_ci));
mutex_unlock(&mvm->mutex);
@@ -990,8 +881,10 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
int ret, i, num_repeats, nbytes = count / 2;
@@ -1015,7 +908,7 @@ static ssize_t iwl_dbgfs_indirection_tbl_write(struct iwl_mvm *mvm,
memcpy(&cmd.indirection_table[i * nbytes], cmd.indirection_table,
ARRAY_SIZE(cmd.indirection_table) % nbytes);
- memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
@@ -1416,6 +1309,8 @@ static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
PRINT_MVM_REF(IWL_MVM_REF_PROTECT_CSA);
PRINT_MVM_REF(IWL_MVM_REF_FW_DBG_COLLECT);
PRINT_MVM_REF(IWL_MVM_REF_INIT_UCODE);
+ PRINT_MVM_REF(IWL_MVM_REF_SENDING_CMD);
+ PRINT_MVM_REF(IWL_MVM_REF_RX);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
index eec52c57f..5f22cc7ac 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-d3.h
@@ -368,7 +368,7 @@ struct iwl_wowlan_gtk_status {
u8 decrypt_key[16];
u8 tkip_mic_key[8];
struct iwl_wowlan_rsc_tsc_params_cmd rsc;
-} __packed;
+} __packed; /* WOWLAN_GTK_MATERIAL_VER_1 */
struct iwl_wowlan_status {
struct iwl_wowlan_gtk_status gtk;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
index 7a16e55df..1ca8e4988 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rx.h
@@ -268,12 +268,25 @@ enum iwl_rx_mpdu_amsdu_info {
IWL_RX_MPDU_AMSDU_LAST_SUBFRAME = 0x80,
};
+enum iwl_rx_l3_proto_values {
+ IWL_RX_L3_TYPE_NONE,
+ IWL_RX_L3_TYPE_IPV4,
+ IWL_RX_L3_TYPE_IPV4_FRAG,
+ IWL_RX_L3_TYPE_IPV6_FRAG,
+ IWL_RX_L3_TYPE_IPV6,
+ IWL_RX_L3_TYPE_IPV6_IN_IPV4,
+ IWL_RX_L3_TYPE_ARP,
+ IWL_RX_L3_TYPE_EAPOL,
+};
+
+#define IWL_RX_L3_PROTO_POS 4
+
enum iwl_rx_l3l4_flags {
IWL_RX_L3L4_IP_HDR_CSUM_OK = BIT(0),
IWL_RX_L3L4_TCP_UDP_CSUM_OK = BIT(1),
IWL_RX_L3L4_TCP_FIN_SYN_RST_PSH = BIT(2),
IWL_RX_L3L4_TCP_ACK = BIT(3),
- IWL_RX_L3L4_L3_PROTO_MASK = 0xf << 4,
+ IWL_RX_L3L4_L3_PROTO_MASK = 0xf << IWL_RX_L3_PROTO_POS,
IWL_RX_L3L4_L4_PROTO_MASK = 0xf << 8,
IWL_RX_L3L4_RSS_HASH_MASK = 0xf << 12,
};
@@ -424,21 +437,28 @@ struct iwl_rxq_sync_notification {
/**
* Internal message identifier
*
+* @IWL_MVM_RXQ_EMPTY: empty sync notification
* @IWL_MVM_RXQ_NOTIF_DEL_BA: notify RSS queues of delBA
*/
enum iwl_mvm_rxq_notif_type {
+ IWL_MVM_RXQ_EMPTY,
IWL_MVM_RXQ_NOTIF_DEL_BA,
};
/**
* struct iwl_mvm_internal_rxq_notif - Internal representation of the data sent
* in &iwl_rxq_sync_cmd. Should be DWORD aligned.
+* FW is agnostic to the payload, so there are no endianity requirements.
*
* @type: value from &iwl_mvm_rxq_notif_type
+* @sync: ctrl path is waiting for all notifications to be received
+* @cookie: internal cookie to identify old notifications
* @data: payload
*/
struct iwl_mvm_internal_rxq_notif {
- u32 type;
+ u16 type;
+ u16 sync;
+ u32 cookie;
u8 data[];
} __packed;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
index 90d911394..38b1d045b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-sta.h
@@ -173,7 +173,7 @@ enum iwl_sta_key_flag {
/**
* enum iwl_sta_modify_flag - indicate to the fw what flag are being changed
- * @STA_MODIFY_KEY: this command modifies %key
+ * @STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @STA_MODIFY_TX_RATE: unused
* @STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
@@ -183,7 +183,7 @@ enum iwl_sta_key_flag {
* @STA_MODIFY_QUEUES: modify the queues used by this station
*/
enum iwl_sta_modify_flag {
- STA_MODIFY_KEY = BIT(0),
+ STA_MODIFY_QUEUE_REMOVAL = BIT(0),
STA_MODIFY_TID_DISABLE_TX = BIT(1),
STA_MODIFY_TX_RATE = BIT(2),
STA_MODIFY_ADD_BA_TID = BIT(3),
@@ -255,8 +255,10 @@ struct iwl_mvm_keyinfo {
__le64 hw_tkip_mic_tx_key;
} __packed;
-#define IWL_ADD_STA_STATUS_MASK 0xFF
-#define IWL_ADD_STA_BAID_MASK 0xFF00
+#define IWL_ADD_STA_STATUS_MASK 0xFF
+#define IWL_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWL_ADD_STA_BAID_MASK 0x7F00
+#define IWL_ADD_STA_BAID_SHIFT 8
/**
* struct iwl_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
index ba3f0bbdd..dadcccd88 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h
@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -193,11 +194,41 @@ enum iwl_tx_pm_timeouts {
#define IWL_BAR_DFAULT_RETRY_LIMIT 60
#define IWL_LOW_RETRY_LIMIT 7
+/**
+ * enum iwl_tx_offload_assist_flags_pos - set %iwl_tx_cmd offload_assist values
+ * @TX_CMD_OFFLD_IP_HDR_OFFSET: offset to start of IP header (in words)
+ * from mac header end. For normal case it is 4 words for SNAP.
+ * note: tx_cmd, mac header and pad are not counted in the offset.
+ * This is used to help the offload in case there is tunneling such as
+ * IPv6 in IPv4, in such case the ip header offset should point to the
+ * inner ip header and IPv4 checksum of the external header should be
+ * calculated by driver.
+ * @TX_CMD_OFFLD_L4_EN: enable TCP/UDP checksum
+ * @TX_CMD_OFFLD_L3_EN: enable IP header checksum
+ * @TX_CMD_OFFLD_MH_SIZE: size of the mac header in words. Includes the IV
+ * field. Doesn't include the pad.
+ * @TX_CMD_OFFLD_PAD: mark 2-byte pad was inserted after the mac header for
+ * alignment
+ * @TX_CMD_OFFLD_AMSDU: mark TX command is A-MSDU
+ */
+enum iwl_tx_offload_assist_flags_pos {
+ TX_CMD_OFFLD_IP_HDR = 0,
+ TX_CMD_OFFLD_L4_EN = 6,
+ TX_CMD_OFFLD_L3_EN = 7,
+ TX_CMD_OFFLD_MH_SIZE = 8,
+ TX_CMD_OFFLD_PAD = 13,
+ TX_CMD_OFFLD_AMSDU = 14,
+};
+
+#define IWL_TX_CMD_OFFLD_MH_MASK 0x1f
+#define IWL_TX_CMD_OFFLD_IP_HDR_MASK 0x3f
+
/* TODO: complete documentation for try_cnt and btkill_cnt */
/**
* struct iwl_tx_cmd - TX command struct to FW
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
+ * @offload_assist: TX offload configuration
* @tx_flags: combination of TX_CMD_FLG_*
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
@@ -231,7 +262,7 @@ enum iwl_tx_pm_timeouts {
*/
struct iwl_tx_cmd {
__le16 len;
- __le16 next_frame_len;
+ __le16 offload_assist;
__le32 tx_flags;
struct {
u8 try_cnt;
@@ -255,7 +286,7 @@ struct iwl_tx_cmd {
__le16 reserved4;
u8 payload[0];
struct ieee80211_hdr hdr[0];
-} __packed; /* TX_CMD_API_S_VER_3 */
+} __packed; /* TX_CMD_API_S_VER_6 */
/*
* TX response related data
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
index 4a0fc47c8..41b80ae2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api.h
@@ -80,12 +80,44 @@
#include "fw-api-stats.h"
#include "fw-api-tof.h"
-/* Tx queue numbers */
+/* Tx queue numbers for non-DQA mode */
enum {
IWL_MVM_OFFCHANNEL_QUEUE = 8,
IWL_MVM_CMD_QUEUE = 9,
};
+/*
+ * DQA queue numbers
+ *
+ * @IWL_MVM_DQA_CMD_QUEUE: a queue reserved for sending HCMDs to the FW
+ * @IWL_MVM_DQA_P2P_DEVICE_QUEUE: a queue reserved for P2P device frames
+ * @IWL_MVM_DQA_GCAST_QUEUE: a queue reserved for P2P GO/SoftAP GCAST frames
+ * @IWL_MVM_DQA_BSS_CLIENT_QUEUE: a queue reserved for BSS activity, to ensure
+ * that we are never left without the possibility to connect to an AP.
+ * @IWL_MVM_DQA_MIN_MGMT_QUEUE: first TXQ in pool for MGMT and non-QOS frames.
+ * Each MGMT queue is mapped to a single STA
+ * MGMT frames are frames that return true on ieee80211_is_mgmt()
+ * @IWL_MVM_DQA_MAX_MGMT_QUEUE: last TXQ in pool for MGMT frames
+ * @IWL_MVM_DQA_AP_PROBE_RESP_QUEUE: a queue reserved for P2P GO/SoftAP probe
+ * responses
+ * @IWL_MVM_DQA_MIN_DATA_QUEUE: first TXQ in pool for DATA frames.
+ * DATA frames are intended for !ieee80211_is_mgmt() frames, but if
+ * the MGMT TXQ pool is exhausted, mgmt frames can be sent on DATA queues
+ * as well
+ * @IWL_MVM_DQA_MAX_DATA_QUEUE: last TXQ in pool for DATA frames
+ */
+enum iwl_mvm_dqa_txq {
+ IWL_MVM_DQA_CMD_QUEUE = 0,
+ IWL_MVM_DQA_P2P_DEVICE_QUEUE = 2,
+ IWL_MVM_DQA_GCAST_QUEUE = 3,
+ IWL_MVM_DQA_BSS_CLIENT_QUEUE = 4,
+ IWL_MVM_DQA_MIN_MGMT_QUEUE = 5,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE = 8,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE = 9,
+ IWL_MVM_DQA_MIN_DATA_QUEUE = 10,
+ IWL_MVM_DQA_MAX_DATA_QUEUE = 31,
+};
+
enum iwl_mvm_tx_fifo {
IWL_MVM_TX_FIFO_BK = 0,
IWL_MVM_TX_FIFO_BE,
@@ -279,6 +311,11 @@ enum {
/* Please keep this enum *SORTED* by hex value.
* Needed for binary search, otherwise a warning will be triggered.
*/
+enum iwl_mac_conf_subcmd_ids {
+ LINK_QUALITY_MEASUREMENT_CMD = 0x1,
+ LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF = 0xFE,
+};
+
enum iwl_phy_ops_subcmd_ids {
CMD_DTS_MEASUREMENT_TRIGGER_WIDE = 0x0,
CTDP_CONFIG_CMD = 0x03,
@@ -287,6 +324,10 @@ enum iwl_phy_ops_subcmd_ids {
DTS_MEASUREMENT_NOTIF_WIDE = 0xFF,
};
+enum iwl_system_subcmd_ids {
+ SHARED_MEM_CFG_CMD = 0x0,
+};
+
enum iwl_data_path_subcmd_ids {
UPDATE_MU_GROUPS_CMD = 0x1,
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
@@ -302,6 +343,8 @@ enum iwl_prot_offload_subcmd_ids {
enum {
LEGACY_GROUP = 0x0,
LONG_GROUP = 0x1,
+ SYSTEM_GROUP = 0x2,
+ MAC_CONF_GROUP = 0x3,
PHY_OPS_GROUP = 0x4,
DATA_PATH_GROUP = 0x5,
PROT_OFFLOAD_GROUP = 0xb,
@@ -1923,6 +1966,7 @@ struct iwl_tdls_config_res {
#define TX_FIFO_MAX_NUM 8
#define RX_FIFO_MAX_NUM 2
+#define TX_FIFO_INTERNAL_MAX_NUM 6
/**
* Shared memory configuration information from the FW
@@ -1940,6 +1984,12 @@ struct iwl_tdls_config_res {
* @page_buff_addr: used by UMAC and performance debug (page miss analysis),
* when paging is not supported this should be 0
* @page_buff_size: size of %page_buff_addr
+ * @rxfifo_addr: Start address of rxFifo
+ * @internal_txfifo_addr: start address of internalFifo
+ * @internal_txfifo_size: internal fifos' size
+ *
+ * NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
+ * set, the last 3 members don't exist.
*/
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
@@ -1951,7 +2001,10 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr;
__le32 page_buff_size;
-} __packed; /* SHARED_MEM_ALLOC_API_S_VER_1 */
+ __le32 rxfifo_addr;
+ __le32 internal_txfifo_addr;
+ __le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
/**
* VHT MU-MIMO group configuration
@@ -2002,4 +2055,60 @@ struct iwl_stored_beacon_notif {
u8 data[MAX_STORED_BEACON_SIZE];
} __packed; /* WOWLAN_STROED_BEACON_INFO_S_VER_1 */
+#define LQM_NUMBER_OF_STATIONS_IN_REPORT 16
+
+enum iwl_lqm_cmd_operatrions {
+ LQM_CMD_OPERATION_START_MEASUREMENT = 0x01,
+ LQM_CMD_OPERATION_STOP_MEASUREMENT = 0x02,
+};
+
+enum iwl_lqm_status {
+ LQM_STATUS_SUCCESS = 0,
+ LQM_STATUS_TIMEOUT = 1,
+ LQM_STATUS_ABORT = 2,
+};
+
+/**
+ * Link Quality Measurement command
+ * @cmd_operatrion: command operation to be performed (start or stop)
+ * as defined above.
+ * @mac_id: MAC ID the measurement applies to.
+ * @measurement_time: time of the total measurement to be performed, in uSec.
+ * @timeout: maximum time allowed until a response is sent, in uSec.
+ */
+struct iwl_link_qual_msrmnt_cmd {
+ __le32 cmd_operation;
+ __le32 mac_id;
+ __le32 measurement_time;
+ __le32 timeout;
+} __packed /* LQM_CMD_API_S_VER_1 */;
+
+/**
+ * Link Quality Measurement notification
+ *
+ * @frequent_stations_air_time: an array containing the total air time
+ * (in uSec) used by the most frequently transmitting stations.
+ * @number_of_stations: the number of uniqe stations included in the array
+ * (a number between 0 to 16)
+ * @total_air_time_other_stations: the total air time (uSec) used by all the
+ * stations which are not included in the above report.
+ * @time_in_measurement_window: the total time in uSec in which a measurement
+ * took place.
+ * @tx_frame_dropped: the number of TX frames dropped due to retry limit during
+ * measurement
+ * @mac_id: MAC ID the measurement applies to.
+ * @status: return status. may be one of the LQM_STATUS_* defined above.
+ * @reserved: reserved.
+ */
+struct iwl_link_qual_msrmnt_notif {
+ __le32 frequent_stations_air_time[LQM_NUMBER_OF_STATIONS_IN_REPORT];
+ __le32 number_of_stations;
+ __le32 total_air_time_other_stations;
+ __le32 time_in_measurement_window;
+ __le32 tx_frame_dropped;
+ __le32 mac_id;
+ __le32 status;
+ __le32 reserved[3];
+} __packed; /* LQM_MEASUREMENT_COMPLETE_NTF_API_S_VER1 */
+
#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
index 6938cd37b..e1b6b2c66 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
@@ -7,7 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,7 @@
#include "iwl-csr.h"
static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
- const void *data, size_t datalen)
+ void *data, size_t datalen)
{
const struct iwl_mvm_dump_ptrs *dump_ptrs = data;
ssize_t bytes_read;
@@ -104,7 +104,7 @@ static ssize_t iwl_mvm_read_coredump(char *buffer, loff_t offset, size_t count,
return bytes_read + bytes_read_trans;
}
-static void iwl_mvm_free_coredump(const void *data)
+static void iwl_mvm_free_coredump(void *data)
{
const struct iwl_mvm_dump_ptrs *fw_error_dump = data;
@@ -265,6 +265,66 @@ static void iwl_mvm_dump_fifos(struct iwl_mvm *mvm,
*dump_data = iwl_fw_error_next_data(*dump_data);
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ /* Pull UMAC internal TXF data from all TXFs */
+ for (i = 0;
+ i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++) {
+ fifo_hdr = (void *)(*dump_data)->data;
+ fifo_data = (void *)fifo_hdr->data;
+ fifo_len = mvm->shared_mem_cfg.internal_txfifo_size[i];
+
+ /* No need to try to read the data if the length is 0 */
+ if (fifo_len == 0)
+ continue;
+
+ /* Add a TLV for the internal FIFOs */
+ (*dump_data)->type =
+ cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF);
+ (*dump_data)->len =
+ cpu_to_le32(fifo_len + sizeof(*fifo_hdr));
+
+ fifo_hdr->fifo_num = cpu_to_le32(i);
+
+ /* Mark the number of TXF we're pulling now */
+ iwl_trans_write_prph(mvm->trans, TXF_CPU2_NUM, i);
+
+ fifo_hdr->available_bytes =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_FIFO_ITEM_CNT));
+ fifo_hdr->wr_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_WR_PTR));
+ fifo_hdr->rd_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_RD_PTR));
+ fifo_hdr->fence_ptr =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_FENCE_PTR));
+ fifo_hdr->fence_mode =
+ cpu_to_le32(iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_LOCK_FENCE));
+
+ /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
+ iwl_trans_write_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_ADDR,
+ TXF_CPU2_WR_PTR);
+
+ /* Dummy-read to advance the read pointer to head */
+ iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+
+ /* Read FIFO */
+ fifo_len /= sizeof(u32); /* Size in DWORDS */
+ for (j = 0; j < fifo_len; j++)
+ fifo_data[j] =
+ iwl_trans_read_prph(mvm->trans,
+ TXF_CPU2_READ_MODIFY_DATA);
+ *dump_data = iwl_fw_error_next_data(*dump_data);
+ }
+ }
+
iwl_trans_release_nic_access(mvm->trans, &flags);
}
@@ -280,9 +340,11 @@ void iwl_mvm_free_fw_dump_desc(struct iwl_mvm *mvm)
#define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
#define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
-static const struct {
+struct iwl_prph_range {
u32 start, end;
-} iwl_prph_dump_addr[] = {
+};
+
+static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a00000, .end = 0x00a00000 },
{ .start = 0x00a0000c, .end = 0x00a00024 },
{ .start = 0x00a0002c, .end = 0x00a0003c },
@@ -380,8 +442,18 @@ static const struct {
{ .start = 0x00a44000, .end = 0x00a7bf80 },
};
+static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
+ { .start = 0x00a05c00, .end = 0x00a05c18 },
+ { .start = 0x00a05400, .end = 0x00a056e8 },
+ { .start = 0x00a08000, .end = 0x00a098bc },
+ { .start = 0x00adfc00, .end = 0x00adfd1c },
+ { .start = 0x00a02400, .end = 0x00a02758 },
+};
+
static u32 iwl_dump_prph(struct iwl_trans *trans,
- struct iwl_fw_error_dump_data **data)
+ struct iwl_fw_error_dump_data **data,
+ const struct iwl_prph_range *iwl_prph_dump_addr,
+ u32 range_len)
{
struct iwl_fw_error_dump_prph *prph;
unsigned long flags;
@@ -390,7 +462,7 @@ static u32 iwl_dump_prph(struct iwl_trans *trans,
if (!iwl_trans_grab_nic_access(trans, &flags))
return 0;
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ for (i = 0; i < range_len; i++) {
/* The range includes both boundaries */
int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
iwl_prph_dump_addr[i].start + 4;
@@ -429,9 +501,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct iwl_fw_error_dump_trigger_desc *dump_trig;
struct iwl_mvm_dump_ptrs *fw_error_dump;
u32 sram_len, sram_ofs;
+ struct iwl_fw_dbg_mem_seg_tlv * const *fw_dbg_mem =
+ mvm->fw->dbg_mem_tlv;
u32 file_len, fifo_data_len = 0, prph_len = 0, radio_len = 0;
- u32 smem_len = mvm->cfg->smem_len;
- u32 sram2_len = mvm->cfg->dccm2_len;
+ u32 smem_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->smem_len;
+ u32 sram2_len = mvm->fw->dbg_dynamic_mem ? 0 : mvm->cfg->dccm2_len;
bool monitor_dump_only = false;
int i;
@@ -494,24 +568,54 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo);
}
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ for (i = 0;
+ i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
+ i++) {
+ if (!mem_cfg->internal_txfifo_size[i])
+ continue;
+
+ /* Add header info */
+ fifo_data_len +=
+ mem_cfg->internal_txfifo_size[i] +
+ sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_fifo);
+ }
+ }
+
/* Make room for PRPH registers */
- for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
+ for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) {
/* The range includes both boundaries */
- int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
- iwl_prph_dump_addr[i].start + 4;
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_comm[i].end -
+ iwl_prph_dump_addr_comm[i].start + 4;
prph_len += sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_prph) +
num_bytes_in_chunk;
}
+ if (mvm->cfg->mq_rx_supported) {
+ for (i = 0; i <
+ ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
+ /* The range includes both boundaries */
+ int num_bytes_in_chunk =
+ iwl_prph_dump_addr_9000[i].end -
+ iwl_prph_dump_addr_9000[i].start + 4;
+
+ prph_len += sizeof(*dump_data) +
+ sizeof(struct iwl_fw_error_dump_prph) +
+ num_bytes_in_chunk;
+ }
+ }
+
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000)
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
}
file_len = sizeof(*dump_file) +
sizeof(*dump_data) * 2 +
- sram_len + sizeof(*dump_mem) +
fifo_data_len +
prph_len +
radio_len +
@@ -525,6 +629,13 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
+ /* Make room for MEM segments */
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+ if (fw_dbg_mem[i])
+ file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
+ le32_to_cpu(fw_dbg_mem[i]->len);
+ }
+
/* Make room for fw's virtual image pages, if it exists */
if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
mvm->fw_paging_db[0].fw_paging_block)
@@ -551,6 +662,9 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
mvm->fw_dump_desc->len;
+ if (!mvm->fw->dbg_dynamic_mem)
+ file_len += sram_len + sizeof(*dump_mem);
+
dump_file = vzalloc(file_len);
if (!dump_file) {
kfree(fw_error_dump);
@@ -600,16 +714,36 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
if (monitor_dump_only)
goto dump_trans_data;
- dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
- dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
- dump_mem = (void *)dump_data->data;
- dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
- dump_mem->offset = cpu_to_le32(sram_ofs);
- iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
- sram_len);
+ if (!mvm->fw->dbg_dynamic_mem) {
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_SRAM);
+ dump_mem->offset = cpu_to_le32(sram_ofs);
+ iwl_trans_read_mem_bytes(mvm->trans, sram_ofs, dump_mem->data,
+ sram_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dbg_mem_tlv); i++) {
+ if (fw_dbg_mem[i]) {
+ u32 len = le32_to_cpu(fw_dbg_mem[i]->len);
+ u32 ofs = le32_to_cpu(fw_dbg_mem[i]->ofs);
+
+ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
+ dump_data->len = cpu_to_le32(len +
+ sizeof(*dump_mem));
+ dump_mem = (void *)dump_data->data;
+ dump_mem->type = fw_dbg_mem[i]->data_type;
+ dump_mem->offset = cpu_to_le32(ofs);
+ iwl_trans_read_mem_bytes(mvm->trans, ofs,
+ dump_mem->data,
+ len);
+ dump_data = iwl_fw_error_next_data(dump_data);
+ }
+ }
if (smem_len) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -617,10 +751,10 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->smem_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->smem_offset,
dump_mem->data, smem_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
if (sram2_len) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data;
@@ -628,11 +762,11 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(mvm->cfg->dccm2_offset);
iwl_trans_read_mem_bytes(mvm->trans, mvm->cfg->dccm2_offset,
dump_mem->data, sram2_len);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_8000 &&
CSR_HW_REV_STEP(mvm->trans->hw_rev) == SILICON_B_STEP) {
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(IWL8260_ICCM_LEN +
sizeof(*dump_mem));
@@ -641,6 +775,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
dump_mem->offset = cpu_to_le32(IWL8260_ICCM_OFFSET);
iwl_trans_read_mem_bytes(mvm->trans, IWL8260_ICCM_OFFSET,
dump_mem->data, IWL8260_ICCM_LEN);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
/* Dump fw's virtual image */
@@ -651,7 +786,6 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
struct page *pages =
mvm->fw_paging_db[i].fw_paging_block;
- dump_data = iwl_fw_error_next_data(dump_data);
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
dump_data->len = cpu_to_le32(sizeof(*paging) +
PAGING_BLOCK_SIZE);
@@ -659,12 +793,20 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
paging->index = cpu_to_le32(i);
memcpy(paging->data, page_address(pages),
PAGING_BLOCK_SIZE);
+ dump_data = iwl_fw_error_next_data(dump_data);
}
}
- dump_data = iwl_fw_error_next_data(dump_data);
- if (prph_len)
- iwl_dump_prph(mvm->trans, &dump_data);
+ if (prph_len) {
+ iwl_dump_prph(mvm->trans, &dump_data,
+ iwl_prph_dump_addr_comm,
+ ARRAY_SIZE(iwl_prph_dump_addr_comm));
+
+ if (mvm->cfg->mq_rx_supported)
+ iwl_dump_prph(mvm->trans, &dump_data,
+ iwl_prph_dump_addr_9000,
+ ARRAY_SIZE(iwl_prph_dump_addr_9000));
+ }
dump_trans_data:
fw_error_dump->trans_ptr = iwl_trans_dump_data(mvm->trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 09d895faf..7057f35cb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -64,6 +64,7 @@
*
*****************************************************************************/
#include <net/mac80211.h>
+#include <linux/netdevice.h>
#include "iwl-trans.h"
#include "iwl-op-mode.h"
@@ -114,14 +115,18 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
struct iwl_rss_config_cmd cmd = {
.flags = cpu_to_le32(IWL_RSS_ENABLE),
.hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+ IWL_RSS_HASH_TYPE_IPV4_UDP |
IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
IWL_RSS_HASH_TYPE_IPV6_TCP |
+ IWL_RSS_HASH_TYPE_IPV6_UDP |
IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
};
+ /* Do not direct RSS traffic to Q 0 which is our fallback queue */
for (i = 0; i < ARRAY_SIZE(cmd.indirection_table); i++)
- cmd.indirection_table[i] = i % mvm->trans->num_rx_queues;
- memcpy(cmd.secret_key, mvm->secret_key, sizeof(cmd.secret_key));
+ cmd.indirection_table[i] =
+ 1 + (i % (mvm->trans->num_rx_queues - 1));
+ netdev_rss_key_fill(cmd.secret_key, sizeof(cmd.secret_key));
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
}
@@ -176,8 +181,12 @@ static int iwl_fill_paging_mem(struct iwl_mvm *mvm, const struct fw_img *image)
}
}
- if (sec_idx >= IWL_UCODE_SECTION_MAX) {
- IWL_ERR(mvm, "driver didn't find paging image\n");
+ /*
+ * If paging is enabled there should be at least 2 more sections left
+ * (one for CSS and one for Paging data)
+ */
+ if (sec_idx >= ARRAY_SIZE(image->sec) - 1) {
+ IWL_ERR(mvm, "Paging: Missing CSS and/or paging sections\n");
iwl_free_fw_paging(mvm);
return -EINVAL;
}
@@ -412,7 +421,9 @@ static int iwl_trans_get_paging_item(struct iwl_mvm *mvm)
goto exit;
}
- mvm->trans->paging_download_buf = kzalloc(MAX_PAGING_IMAGE_SIZE,
+ /* Add an extra page for headers */
+ mvm->trans->paging_download_buf = kzalloc(PAGING_BLOCK_SIZE +
+ FW_PAGING_SIZE,
GFP_KERNEL);
if (!mvm->trans->paging_download_buf) {
ret = -ENOMEM;
@@ -524,7 +535,7 @@ static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data *notif_wait,
return true;
}
- WARN_ON(iwl_phy_db_set_section(phy_db, pkt, GFP_ATOMIC));
+ WARN_ON(iwl_phy_db_set_section(phy_db, pkt));
return false;
}
@@ -643,7 +654,10 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
*/
memset(&mvm->queue_info, 0, sizeof(mvm->queue_info));
- mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ mvm->queue_info[IWL_MVM_DQA_CMD_QUEUE].hw_queue_refcount = 1;
+ else
+ mvm->queue_info[IWL_MVM_CMD_QUEUE].hw_queue_refcount = 1;
for (i = 0; i < IEEE80211_MAX_QUEUES; i++)
atomic_set(&mvm->mac80211_queue_stop_count[i], 0);
@@ -790,17 +804,22 @@ out:
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
struct iwl_host_cmd cmd = {
- .id = SHARED_MEM_CFG,
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
- struct iwl_rx_packet *pkt;
struct iwl_shared_mem_cfg *mem_cfg;
+ struct iwl_rx_packet *pkt;
u32 i;
lockdep_assert_held(&mvm->mutex);
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
+ cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
+ else
+ cmd.id = SHARED_MEM_CFG;
+
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
@@ -826,6 +845,25 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
le32_to_cpu(mem_cfg->page_buff_addr);
mvm->shared_mem_cfg.page_buff_size =
le32_to_cpu(mem_cfg->page_buff_size);
+
+ /* new API has more data */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
+ mvm->shared_mem_cfg.rxfifo_addr =
+ le32_to_cpu(mem_cfg->rxfifo_addr);
+ mvm->shared_mem_cfg.internal_txfifo_addr =
+ le32_to_cpu(mem_cfg->internal_txfifo_addr);
+
+ BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
+ sizeof(mem_cfg->internal_txfifo_size));
+
+ for (i = 0;
+ i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
+ i++)
+ mvm->shared_mem_cfg.internal_txfifo_size[i] =
+ le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
+ }
+
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
iwl_free_resp(&cmd);
@@ -944,7 +982,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
/* Add all the PHY contexts */
- chan = &mvm->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels[0];
+ chan = &mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels[0];
cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_NO_HT);
for (i = 0; i < NUM_PHY_CTX; i++) {
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index e885db346..7aae068c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -252,10 +252,14 @@ unsigned long iwl_mvm_get_used_hw_queues(struct iwl_mvm *mvm,
.exclude_vif = exclude_vif,
.used_hw_queues =
BIT(IWL_MVM_OFFCHANNEL_QUEUE) |
- BIT(mvm->aux_queue) |
- BIT(IWL_MVM_CMD_QUEUE),
+ BIT(mvm->aux_queue),
};
+ if (iwl_mvm_is_dqa_supported(mvm))
+ data.used_hw_queues |= BIT(IWL_MVM_DQA_CMD_QUEUE);
+ else
+ data.used_hw_queues |= BIT(IWL_MVM_CMD_QUEUE);
+
lockdep_assert_held(&mvm->mutex);
/* mark all VIF used hw queues */
@@ -425,12 +429,17 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
}
- /* Find available queues, and allocate them to the ACs */
+ /*
+ * Find available queues, and allocate them to the ACs. When in
+ * DQA-mode they aren't really used, and this is done only so the
+ * mac80211 ieee80211_check_queues() function won't fail
+ */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
u8 queue = find_first_zero_bit(&used_hw_queues,
mvm->first_agg_queue);
- if (queue >= mvm->first_agg_queue) {
+ if (!iwl_mvm_is_dqa_supported(mvm) &&
+ queue >= mvm->first_agg_queue) {
IWL_ERR(mvm, "Failed to allocate queue\n");
ret = -EIO;
goto exit_fail;
@@ -442,13 +451,19 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
/* Allocate the CAB queue for softAP and GO interfaces */
if (vif->type == NL80211_IFTYPE_AP) {
- u8 queue = find_first_zero_bit(&used_hw_queues,
- mvm->first_agg_queue);
+ u8 queue;
- if (queue >= mvm->first_agg_queue) {
- IWL_ERR(mvm, "Failed to allocate cab queue\n");
- ret = -EIO;
- goto exit_fail;
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ queue = find_first_zero_bit(&used_hw_queues,
+ mvm->first_agg_queue);
+
+ if (queue >= mvm->first_agg_queue) {
+ IWL_ERR(mvm, "Failed to allocate cab queue\n");
+ ret = -EIO;
+ goto exit_fail;
+ }
+ } else {
+ queue = IWL_MVM_DQA_GCAST_QUEUE;
}
vif->cab_queue = queue;
@@ -486,15 +501,21 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_TX_FIFO_VO, 0, wdg_timeout);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_enable_ac_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_TX_FIFO_VO, 0,
+ wdg_timeout);
break;
case NL80211_IFTYPE_AP:
iwl_mvm_enable_ac_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout);
/* fall through */
default:
+ /* If DQA is supported - queues will be enabled when needed */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ break;
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_enable_ac_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -514,15 +535,31 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
switch (vif->type) {
case NL80211_IFTYPE_P2P_DEVICE:
- iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
- IWL_MVM_OFFCHANNEL_QUEUE, IWL_MAX_TID_COUNT,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_txq(mvm, IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MVM_OFFCHANNEL_QUEUE,
+ IWL_MAX_TID_COUNT, 0);
+
break;
case NL80211_IFTYPE_AP:
iwl_mvm_disable_txq(mvm, vif->cab_queue, vif->cab_queue,
IWL_MAX_TID_COUNT, 0);
+
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_txq(mvm,
+ IWL_MVM_DQA_AP_PROBE_RESP_QUEUE,
+ vif->hw_queue[0], IWL_MAX_TID_COUNT,
+ 0);
/* fall through */
default:
+ /*
+ * If DQA is supported - queues were already disabled, since in
+ * DQA-mode the queues are a property of the STA and not of the
+ * vif, and at this point the STA was already deleted
+ */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ break;
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
iwl_mvm_disable_txq(mvm, vif->hw_queue[ac],
vif->hw_queue[ac],
@@ -532,7 +569,7 @@ void iwl_mvm_mac_ctxt_release(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
static void iwl_mvm_ack_rates(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- enum ieee80211_band band,
+ enum nl80211_band band,
u8 *cck_rates, u8 *ofdm_rates)
{
struct ieee80211_supported_band *sband;
@@ -703,7 +740,7 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
rcu_read_lock();
chanctx = rcu_dereference(vif->chanctx_conf);
iwl_mvm_ack_rates(mvm, vif, chanctx ? chanctx->def.chan->band
- : IEEE80211_BAND_2GHZ,
+ : NL80211_BAND_2GHZ,
&cck_ack_rates, &ofdm_ack_rates);
rcu_read_unlock();
@@ -1038,7 +1075,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm,
cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
RATE_MCS_ANT_POS);
- if (info->band == IEEE80211_BAND_5GHZ || vif->p2p) {
+ if (info->band == NL80211_BAND_5GHZ || vif->p2p) {
rate = IWL_FIRST_OFDM_RATE;
} else {
rate = IWL_FIRST_CCK_RATE;
@@ -1489,7 +1526,7 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
rx_status.device_timestamp = le32_to_cpu(sb->system_time);
rx_status.band =
(sb->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status.freq =
ieee80211_channel_to_frequency(le16_to_cpu(sb->channel),
rx_status.band);
@@ -1499,5 +1536,5 @@ void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
/* pass it as regular rx to mac80211 */
- ieee80211_rx_napi(mvm->hw, skb, NULL);
+ ieee80211_rx_napi(mvm->hw, NULL, skb, NULL);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index a50f4df7e..18a8474b5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -229,7 +229,11 @@ void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
spin_lock_bh(&mvm->refs_lock);
- WARN_ON(!mvm->refs[ref_type]--);
+ if (WARN_ON(!mvm->refs[ref_type])) {
+ spin_unlock_bh(&mvm->refs_lock);
+ return;
+ }
+ mvm->refs[ref_type]--;
spin_unlock_bh(&mvm->refs_lock);
iwl_trans_unref(mvm->trans);
}
@@ -439,11 +443,19 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR);
+ if (iwl_mvm_has_new_rx_api(mvm))
+ ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+
+ if (mvm->trans->num_rx_queues > 1)
+ ieee80211_hw_set(hw, USES_RSS);
if (mvm->trans->max_skb_frags)
hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG;
- hw->queues = mvm->first_agg_queue;
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ hw->queues = mvm->first_agg_queue;
+ else
+ hw->queues = IEEE80211_MAX_QUEUES;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
IEEE80211_RADIOTAP_MCS_HAVE_STBC;
@@ -550,18 +562,18 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
- if (mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels)
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
- if (mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels) {
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
+ if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) {
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_BEAMFORMER) &&
fw_has_api(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_API_LQ_SS_PARAMS))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap.cap |=
+ hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |=
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE;
}
@@ -665,12 +677,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->netdev_features |= mvm->cfg->features;
- if (!iwl_mvm_is_csum_supported(mvm))
- hw->netdev_features &= ~NETIF_F_RXCSUM;
-
- if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
- hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6;
+ if (!iwl_mvm_is_csum_supported(mvm)) {
+ hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS |
+ NETIF_F_RXCSUM);
+ /* We may support SW TX CSUM */
+ if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS;
+ }
ret = ieee80211_register_hw(mvm->hw);
if (ret)
@@ -847,6 +860,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
bool amsdu = params->amsdu;
+ u16 timeout = params->timeout;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
@@ -887,10 +901,12 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = -EINVAL;
break;
}
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size,
+ timeout);
break;
case IEEE80211_AMPDU_RX_STOP:
- ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size);
+ ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size,
+ timeout);
break;
case IEEE80211_AMPDU_TX_START:
if (!iwl_enable_tx_ampdu(mvm->cfg)) {
@@ -992,6 +1008,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
memset(mvm->sta_drained, 0, sizeof(mvm->sta_drained));
+ memset(mvm->sta_deferred_frames, 0, sizeof(mvm->sta_deferred_frames));
memset(mvm->tfd_drained, 0, sizeof(mvm->tfd_drained));
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
memset(&mvm->last_bt_notif_old, 0, sizeof(mvm->last_bt_notif_old));
@@ -1180,6 +1197,7 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
+ flush_work(&mvm->add_stream_wk);
cancel_delayed_work_sync(&mvm->fw_dump_wk);
iwl_mvm_free_fw_dump_desc(mvm);
@@ -1823,6 +1841,11 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
+ if (changes & BSS_CHANGED_ASSOC && !bss_conf->assoc &&
+ mvmvif->lqm_active)
+ iwl_mvm_send_lqm_cmd(vif, LQM_CMD_OPERATION_STOP_MEASUREMENT,
+ 0, 0);
+
/*
* If we're not associated yet, take the (new) BSSID before associating
* so the firmware knows. If we're already associated, then use the old
@@ -2342,7 +2365,8 @@ static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return;
}
- if (iwlwifi_mod_params.uapsd_disable) {
+ if (!vif->p2p &&
+ (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) {
vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD;
return;
}
@@ -2378,6 +2402,22 @@ iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm,
peer_addr, action);
}
+static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ struct iwl_mvm_tid_data *tid_data;
+ struct sk_buff *skb;
+ int i;
+
+ spin_lock_bh(&mvm_sta->lock);
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
+ tid_data = &mvm_sta->tid_data[i];
+ while ((skb = __skb_dequeue(&tid_data->deferred_tx_frames)))
+ ieee80211_free_txskb(mvm->hw, skb);
+ }
+ spin_unlock_bh(&mvm_sta->lock);
+}
+
static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2398,6 +2438,33 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
/* if a STA is being removed, reuse its ID */
flush_work(&mvm->sta_drained_wk);
+ /*
+ * If we are in a STA removal flow and in DQA mode:
+ *
+ * This is after the sync_rcu part, so the queues have already been
+ * flushed. No more TXs on their way in mac80211's path, and no more in
+ * the queues.
+ * Also, we won't be getting any new TX frames for this station.
+ * What we might have are deferred TX frames that need to be taken care
+ * of.
+ *
+ * Drop any still-queued deferred-frame before removing the STA, and
+ * make sure the worker is no longer handling frames for this STA.
+ */
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST &&
+ iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ iwl_mvm_purge_deferred_tx_frames(mvm, mvm_sta);
+ flush_work(&mvm->add_stream_wk);
+
+ /*
+ * No need to make sure deferred TX indication is off since the
+ * worker will already remove it if it was on
+ */
+ }
+
mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
@@ -2861,7 +2928,7 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)),
.sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id),
/* Set the channel info data */
- .channel_info.band = (channel->band == IEEE80211_BAND_2GHZ) ?
+ .channel_info.band = (channel->band == NL80211_BAND_2GHZ) ?
PHY_BAND_24 : PHY_BAND_5,
.channel_info.channel = channel->hw_value,
.channel_info.width = PHY_VHT_CHANNEL_MODE20,
@@ -3630,6 +3697,11 @@ static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_STATION:
+ if (mvmvif->lqm_active)
+ iwl_mvm_send_lqm_cmd(vif,
+ LQM_CMD_OPERATION_STOP_MEASUREMENT,
+ 0, 0);
+
/* Schedule the time event to a bit before beacon 1,
* to make sure we're in the new channel when the
* GO/AP arrives.
@@ -3729,6 +3801,10 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
+ /* Make sure we're done with the deferred traffic before flushing */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ flush_work(&mvm->add_stream_wk);
+
mutex_lock(&mvm->mutex);
mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -3775,8 +3851,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
if (idx != 0)
return -ENOENT;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
mutex_lock(&mvm->mutex);
@@ -3822,8 +3898,8 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
+ if (!fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return;
/* if beacon filtering isn't on mac80211 does it anyway */
@@ -3976,6 +4052,55 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
}
}
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq);
+ u32 qmask = BIT(mvm->trans->num_rx_queues) - 1;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
+
+ notif->cookie = mvm->queue_sync_cookie;
+
+ if (notif->sync)
+ atomic_set(&mvm->queue_sync_counter,
+ mvm->trans->num_rx_queues);
+
+ ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
+ goto out;
+ }
+
+ if (notif->sync)
+ ret = wait_event_timeout(notif_waitq,
+ atomic_read(&mvm->queue_sync_counter) == 0,
+ HZ);
+ WARN_ON_ONCE(!ret);
+
+out:
+ atomic_set(&mvm->queue_sync_counter, 0);
+ mvm->queue_sync_cookie++;
+}
+
+static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_internal_rxq_notif data = {
+ .type = IWL_MVM_RXQ_EMPTY,
+ .sync = 1,
+ };
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data));
+ mutex_unlock(&mvm->mutex);
+}
+
const struct ieee80211_ops iwl_mvm_hw_ops = {
.tx = iwl_mvm_mac_tx,
.ampdu_action = iwl_mvm_mac_ampdu_action,
@@ -4032,6 +4157,8 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.event_callback = iwl_mvm_mac_event_callback,
+ .sync_rx_queues = iwl_mvm_sync_rx_queues,
+
CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd)
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 9abbc93e3..ffbd41dcc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -208,7 +208,7 @@ enum iwl_power_scheme {
};
#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
@@ -301,6 +301,8 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_PROTECT_CSA,
IWL_MVM_REF_FW_DBG_COLLECT,
IWL_MVM_REF_INIT_UCODE,
+ IWL_MVM_REF_SENDING_CMD,
+ IWL_MVM_REF_RX,
/* update debugfs.c when changing this */
@@ -453,6 +455,12 @@ struct iwl_mvm_vif {
/* TCP Checksum Offload */
netdev_features_t features;
+
+ /*
+ * link quality measurement - used to check whether this interface
+ * is in the middle of a link quality measurement
+ */
+ bool lqm_active;
};
static inline struct iwl_mvm_vif *
@@ -602,6 +610,87 @@ struct iwl_mvm_shared_mem_cfg {
u32 rxfifo_size[RX_FIFO_MAX_NUM];
u32 page_buff_addr;
u32 page_buff_size;
+ u32 rxfifo_addr;
+ u32 internal_txfifo_addr;
+ u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
+};
+
+/**
+ * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
+ * @head_sn: reorder window head sn
+ * @num_stored: number of mpdus stored in the buffer
+ * @buf_size: the reorder buffer size as set by the last addba request
+ * @sta_id: sta id of this reorder buffer
+ * @queue: queue of this reorder buffer
+ * @last_amsdu: track last ASMDU SN for duplication detection
+ * @last_sub_index: track ASMDU sub frame index for duplication detection
+ * @entries: list of skbs stored
+ * @reorder_time: time the packet was stored in the reorder buffer
+ * @reorder_timer: timer for frames are in the reorder buffer. For AMSDU
+ * it is the time of last received sub-frame
+ * @removed: prevent timer re-arming
+ * @lock: protect reorder buffer internal state
+ * @mvm: mvm pointer, needed for frame timer context
+ */
+struct iwl_mvm_reorder_buffer {
+ u16 head_sn;
+ u16 num_stored;
+ u8 buf_size;
+ u8 sta_id;
+ int queue;
+ u16 last_amsdu;
+ u8 last_sub_index;
+ struct sk_buff_head entries[IEEE80211_MAX_AMPDU_BUF];
+ unsigned long reorder_time[IEEE80211_MAX_AMPDU_BUF];
+ struct timer_list reorder_timer;
+ bool removed;
+ spinlock_t lock;
+ struct iwl_mvm *mvm;
+} ____cacheline_aligned_in_smp;
+
+/**
+ * struct iwl_mvm_baid_data - BA session data
+ * @sta_id: station id
+ * @tid: tid of the session
+ * @baid baid of the session
+ * @timeout: the timeout set in the addba request
+ * @last_rx: last rx jiffies, updated only if timeout passed from last update
+ * @session_timer: timer to check if BA session expired, runs at 2 * timeout
+ * @mvm: mvm pointer, needed for timer context
+ * @reorder_buf: reorder buffer, allocated per queue
+ */
+struct iwl_mvm_baid_data {
+ struct rcu_head rcu_head;
+ u8 sta_id;
+ u8 tid;
+ u8 baid;
+ u16 timeout;
+ unsigned long last_rx;
+ struct timer_list session_timer;
+ struct iwl_mvm *mvm;
+ struct iwl_mvm_reorder_buffer reorder_buf[];
+};
+
+/*
+ * enum iwl_mvm_queue_status - queue status
+ * @IWL_MVM_QUEUE_FREE: the queue is not allocated nor reserved
+ * Basically, this means that this queue can be used for any purpose
+ * @IWL_MVM_QUEUE_RESERVED: queue is reserved but not yet in use
+ * This is the state of a queue that has been dedicated for some RATID
+ * (agg'd or not), but that hasn't yet gone through the actual enablement
+ * of iwl_mvm_enable_txq(), and therefore no traffic can go through it yet.
+ * Note that in this state there is no requirement to already know what TID
+ * should be used with this queue, it is just marked as a queue that will
+ * be used, and shouldn't be allocated to anyone else.
+ * @IWL_MVM_QUEUE_READY: queue is ready to be used
+ * This is the state of a queue that has been fully configured (including
+ * SCD pointers, etc), has a specific RA/TID assigned to it, and can be
+ * used to send traffic.
+ */
+enum iwl_mvm_queue_status {
+ IWL_MVM_QUEUE_FREE,
+ IWL_MVM_QUEUE_RESERVED,
+ IWL_MVM_QUEUE_READY,
};
struct iwl_mvm {
@@ -624,6 +713,8 @@ struct iwl_mvm {
unsigned long status;
+ u32 queue_sync_cookie;
+ atomic_t queue_sync_counter;
/*
* for beacon filtering -
* currently only one interface can be supported
@@ -656,10 +747,12 @@ struct iwl_mvm {
/* Map to HW queue */
u32 hw_queue_to_mac80211;
u8 hw_queue_refcount;
- bool setup_reserved;
+ u8 ra_sta_id; /* The RA this queue is mapped to, if exists */
u16 tid_bitmap; /* Bitmap of the TIDs mapped to this queue */
+ enum iwl_mvm_queue_status status;
} queue_info[IWL_MAX_HW_QUEUES];
spinlock_t queue_info_lock; /* For syncing queue mgmt operations */
+ struct work_struct add_stream_wk; /* To add streams to queues */
atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES];
const char *nvm_file_name;
@@ -679,11 +772,11 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
struct work_struct sta_drained_wk;
+ unsigned long sta_deferred_frames[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
u32 tfd_drained[IWL_MVM_STATION_COUNT];
u8 rx_ba_sessions;
- u32 secret_key[IWL_RSS_HASH_KEY_CNT];
/* configured by mac80211 */
u32 rts_threshold;
@@ -694,6 +787,7 @@ struct iwl_mvm {
struct iwl_mcast_filter_cmd *mcast_filter_cmd;
enum iwl_mvm_scan_type scan_type;
enum iwl_mvm_sched_scan_pass_all_states sched_scan_pass_all;
+ struct timer_list scan_timer;
/* max number of simultaneous scans the FW supports */
unsigned int max_scans;
@@ -903,6 +997,10 @@ struct iwl_mvm {
u32 ciphers[6];
struct iwl_mvm_tof_data tof_data;
+ struct ieee80211_vif *nan_vif;
+#define IWL_MAX_BAID 32
+ struct iwl_mvm_baid_data __rcu *baid_map[IWL_MAX_BAID];
+
/*
* Drop beacons from other APs in AP mode when there are no connected
* clients.
@@ -1048,7 +1146,8 @@ static inline bool iwl_mvm_bt_is_rrc_supported(struct iwl_mvm *mvm)
static inline bool iwl_mvm_is_csum_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_CSUM_SUPPORT);
+ IWL_UCODE_TLV_CAPA_CSUM_SUPPORT) &&
+ !IWL_MVM_HW_CSUM_DISABLE;
}
static inline bool iwl_mvm_is_mplut_supported(struct iwl_mvm *mvm)
@@ -1063,7 +1162,8 @@ bool iwl_mvm_is_p2p_standalone_uapsd_supported(struct iwl_mvm *mvm)
{
return fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD) &&
- IWL_MVM_P2P_UAPSD_STANDALONE;
+ !(iwlwifi_mod_params.uapsd_disable &
+ IWL_DISABLE_UAPSD_P2P_CLIENT);
}
static inline bool iwl_mvm_has_new_rx_api(struct iwl_mvm *mvm)
@@ -1115,9 +1215,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm);
/* Utils */
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
- enum ieee80211_band band);
+ enum nl80211_band band);
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
@@ -1224,7 +1324,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
void iwl_mvm_rx_phy_cmd_mq(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue);
int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
const u8 *data, u32 count);
@@ -1297,6 +1397,7 @@ int iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
+void iwl_mvm_scan_timeout(unsigned long data);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -1449,26 +1550,10 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
bool iwl_mvm_bt_coex_is_ant_avail(struct iwl_mvm *mvm, u8 ant);
bool iwl_mvm_bt_coex_is_shared_ant_avail(struct iwl_mvm *mvm);
bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
- enum ieee80211_band band);
+ enum nl80211_band band);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_is_shared_ant_avail_old(struct iwl_mvm *mvm);
-void iwl_mvm_bt_coex_vif_change_old(struct iwl_mvm *mvm);
-int iwl_send_bt_init_conf_old(struct iwl_mvm *mvm);
-void iwl_mvm_rx_bt_coex_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_bt_rssi_event_old(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- enum ieee80211_rssi_event_data);
-u16 iwl_mvm_coex_agg_time_limit_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_mimo_allowed_old(struct iwl_mvm *mvm,
- struct ieee80211_sta *sta);
-bool iwl_mvm_bt_coex_is_tpc_allowed_old(struct iwl_mvm *mvm,
- enum ieee80211_band band);
-void iwl_mvm_rx_ant_coupling_notif_old(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
-
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
void
@@ -1563,6 +1648,10 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
void iwl_mvm_stop_mac_queues(struct iwl_mvm *mvm, unsigned long mq);
+/* Re-configure the SCD for a queue that has already been configured */
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn);
+
/* Thermal management and CT-kill */
void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_temp_changed(struct iwl_mvm *mvm, u32 temp);
@@ -1625,6 +1714,10 @@ void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
void iwl_mvm_rx_tdls_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_tdls_ch_switch_work(struct work_struct *work);
+void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
+ struct iwl_mvm_internal_rxq_notif *notif,
+ u32 size);
+void iwl_mvm_reorder_timer_expired(unsigned long data);
struct ieee80211_vif *iwl_mvm_get_bss_vif(struct iwl_mvm *mvm);
void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error);
@@ -1634,4 +1727,10 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
const char *errmsg);
+/* Link Quality Measurement */
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+ enum iwl_lqm_cmd_operatrions operation,
+ u32 duration, u32 timeout);
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm);
+
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index d27839909..a68054f12 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -292,7 +292,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(DTS_MEASUREMENT_NOTIFICATION, iwl_mvm_temp_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, DTS_MEASUREMENT_NOTIF_WIDE,
- iwl_mvm_temp_notif, RX_HANDLER_ASYNC_LOCKED),
+ iwl_mvm_temp_notif, RX_HANDLER_ASYNC_UNLOCKED),
RX_HANDLER_GRP(PHY_OPS_GROUP, CT_KILL_NOTIFICATION,
iwl_mvm_ct_kill_notif, RX_HANDLER_SYNC),
@@ -421,6 +421,21 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
+ HCMD_NAME(SHARED_MEM_CFG_CMD),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
+static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LINK_QUALITY_MEASUREMENT_CMD),
+ HCMD_NAME(LINK_QUALITY_MEASUREMENT_COMPLETE_NOTIF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
@@ -449,6 +464,8 @@ static const struct iwl_hcmd_names iwl_mvm_prot_offload_names[] = {
static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[LEGACY_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
[LONG_GROUP] = HCMD_ARR(iwl_mvm_legacy_names),
+ [SYSTEM_GROUP] = HCMD_ARR(iwl_mvm_system_names),
+ [MAC_CONF_GROUP] = HCMD_ARR(iwl_mvm_mac_conf_names),
[PHY_OPS_GROUP] = HCMD_ARR(iwl_mvm_phy_names),
[DATA_PATH_GROUP] = HCMD_ARR(iwl_mvm_data_path_names),
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
@@ -537,8 +554,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
mvm->aux_queue = 15;
- mvm->first_agg_queue = 16;
- mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ mvm->first_agg_queue = 16;
+ mvm->last_agg_queue = mvm->cfg->base_params->num_of_queues - 1;
+ } else {
+ mvm->first_agg_queue = IWL_MVM_DQA_MIN_DATA_QUEUE;
+ mvm->last_agg_queue = IWL_MVM_DQA_MAX_DATA_QUEUE;
+ }
if (mvm->cfg->base_params->num_of_queues == 16) {
mvm->aux_queue = 11;
mvm->first_agg_queue = 12;
@@ -562,12 +584,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
INIT_DELAYED_WORK(&mvm->fw_dump_wk, iwl_mvm_fw_error_dump_wk);
INIT_DELAYED_WORK(&mvm->tdls_cs.dwork, iwl_mvm_tdls_ch_switch_work);
+ INIT_WORK(&mvm->add_stream_wk, iwl_mvm_add_new_dqa_stream_wk);
spin_lock_init(&mvm->d0i3_tx_lock);
spin_lock_init(&mvm->refs_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
+ atomic_set(&mvm->queue_sync_counter, 0);
+
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
@@ -601,7 +626,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
- trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
+ if (iwl_mvm_is_dqa_supported(mvm))
+ trans_cfg.cmd_queue = IWL_MVM_DQA_CMD_QUEUE;
+ else
+ trans_cfg.cmd_queue = IWL_MVM_CMD_QUEUE;
trans_cfg.cmd_fifo = IWL_MVM_TX_FIFO_CMD;
trans_cfg.scd_set_active = true;
@@ -707,8 +735,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_mvm_tof_init(mvm);
- /* init RSS hash key */
- get_random_bytes(mvm->secret_key, sizeof(mvm->secret_key));
+ setup_timer(&mvm->scan_timer, iwl_mvm_scan_timeout,
+ (unsigned long)mvm);
return op_mode;
@@ -763,6 +791,11 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
iwl_mvm_tof_clean(mvm);
+ del_timer_sync(&mvm->scan_timer);
+
+ mutex_destroy(&mvm->mutex);
+ mutex_destroy(&mvm->d0i3_suspend_mutex);
+
ieee80211_free_hw(mvm->hw);
}
@@ -904,7 +937,7 @@ static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_rx_mpdu(mvm, napi, rxb);
else if (pkt->hdr.cmd == FRAME_RELEASE)
- iwl_mvm_rx_frame_release(mvm, rxb, 0);
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, 0);
else if (pkt->hdr.cmd == REPLY_RX_PHY_CMD)
iwl_mvm_rx_rx_phy_cmd(mvm, rxb);
else
@@ -1182,7 +1215,6 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
struct iwl_d0i3_iter_data *iter_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvmsta;
u32 available_tids = 0;
u8 tid;
@@ -1191,11 +1223,10 @@ static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
return false;
- ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
- if (IS_ERR_OR_NULL(ap_sta))
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
+ if (!mvmsta)
return false;
- mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
@@ -1606,7 +1637,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (unlikely(pkt->hdr.cmd == FRAME_RELEASE))
- iwl_mvm_rx_frame_release(mvm, rxb, queue);
+ iwl_mvm_rx_frame_release(mvm, napi, rxb, queue);
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
pkt->hdr.group_id == DATA_PATH_GROUP))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
index 6e6a56f21..95138830b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/phy-ctxt.c
@@ -147,7 +147,7 @@ static void iwl_mvm_phy_ctxt_cmd_data(struct iwl_mvm *mvm,
u8 active_cnt, idle_cnt;
/* Set the channel info data */
- cmd->ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ cmd->ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd->ci.channel = chandef->chan->hw_value;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f313910cd..7b1f6ad60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -227,7 +227,7 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
}
- cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+ cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
if (mvm->cur_ucode == IWL_UCODE_WOWLAN || cmd->flags &
cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 61d0a8cd1..81dd2f6a4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -829,7 +829,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
/* Convert a ucode rate into an rs_rate object */
static int rs_rate_from_ucode_rate(const u32 ucode_rate,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct rs_rate *rate)
{
u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
@@ -848,7 +848,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
!(ucode_rate & RATE_MCS_VHT_MSK)) {
if (num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -1043,7 +1043,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
return;
} else if (is_siso(rate)) {
/* Downgrade to Legacy if we were in SISO */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -1850,7 +1850,7 @@ static int rs_switch_to_column(struct iwl_mvm *mvm,
rate->ant = column->ant;
if (column->mode == RS_LEGACY) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -2020,7 +2020,7 @@ static void rs_get_adjacent_txp(struct iwl_mvm *mvm, int index,
}
static bool rs_tpc_allowed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct rs_rate *rate, enum ieee80211_band band)
+ struct rs_rate *rate, enum nl80211_band band)
{
int index = rate->index;
bool cam = (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM);
@@ -2126,7 +2126,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_vif *vif = mvm_sta->vif;
struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct iwl_rate_scale_data *window;
struct rs_rate *rate = &tbl->rate;
enum tpc_action action;
@@ -2148,7 +2148,7 @@ static bool rs_tpc_perform(struct iwl_mvm *mvm,
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
if (WARN_ON(!chanctx_conf))
- band = IEEE80211_NUM_BANDS;
+ band = NUM_NL80211_BANDS;
else
band = chanctx_conf->def.chan->band;
rcu_read_unlock();
@@ -2606,7 +2606,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
rate->type = lq_sta->is_vht ? LQ_VHT_MIMO2 : LQ_HT_MIMO2;
else if (lq_sta->max_siso_rate_idx != IWL_RATE_INVALID)
rate->type = lq_sta->is_vht ? LQ_VHT_SISO : LQ_HT_SISO;
- else if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ else if (lq_sta->band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
else
rate->type = LQ_LEGACY_G;
@@ -2623,7 +2623,7 @@ static void rs_init_optimal_rate(struct iwl_mvm *mvm,
} else {
lq_sta->optimal_rate_mask = lq_sta->active_legacy_rate;
- if (lq_sta->band == IEEE80211_BAND_5GHZ) {
+ if (lq_sta->band == NL80211_BAND_5GHZ) {
lq_sta->optimal_rates = rs_optimal_rates_5ghz_legacy;
lq_sta->optimal_nentries =
ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2679,7 +2679,7 @@ static struct rs_rate *rs_get_optimal_rate(struct iwl_mvm *mvm,
static void rs_get_initial_rate(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct rs_rate *rate)
{
int i, nentries;
@@ -2714,7 +2714,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
rate->index = find_first_bit(&lq_sta->active_legacy_rate,
BITS_PER_LONG);
- if (band == IEEE80211_BAND_5GHZ) {
+ if (band == NL80211_BAND_5GHZ) {
rate->type = LQ_LEGACY_A;
initial_rates = rs_optimal_rates_5ghz_legacy;
nentries = ARRAY_SIZE(rs_optimal_rates_5ghz_legacy);
@@ -2814,7 +2814,7 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
static void rs_initialize_lq(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct iwl_lq_sta *lq_sta,
- enum ieee80211_band band,
+ enum nl80211_band band,
bool init)
{
struct iwl_scale_tbl_info *tbl;
@@ -3097,7 +3097,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
* Called after adding a new station to initialize rate scaling
*/
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band, bool init)
+ enum nl80211_band band, bool init)
{
int i, j;
struct ieee80211_hw *hw = mvm->hw;
@@ -3203,7 +3203,7 @@ static void rs_rate_update(void *mvm_r,
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd,
- enum ieee80211_band band,
+ enum nl80211_band band,
u32 ucode_rate)
{
struct rs_rate rate;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index bdb6f2d8d..90d046fb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -305,7 +305,7 @@ struct iwl_lq_sta {
bool stbc_capable; /* Tx STBC is supported by chip and Rx by STA */
bool bfer_capable; /* Remote supports beamformee and we BFer */
- enum ieee80211_band band;
+ enum nl80211_band band;
/* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
unsigned long active_legacy_rate;
@@ -358,7 +358,7 @@ struct iwl_lq_sta {
/* Initialize station's rate scaling information after adding station */
void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- enum ieee80211_band band, bool init);
+ enum nl80211_band band, bool init);
/* Notify RS about Tx status */
void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 485cfc1a4..ab7f7eda9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -97,6 +97,7 @@ void iwl_mvm_rx_rx_phy_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Adds the rxb to a new skb and give it to mac80211
*/
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
struct napi_struct *napi,
struct sk_buff *skb,
struct ieee80211_hdr *hdr, u16 len,
@@ -131,7 +132,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
fraglen, rxb->truesize);
}
- ieee80211_rx_napi(mvm->hw, skb, napi);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
/*
@@ -271,6 +272,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 rate_n_flags;
u32 rx_pkt_status;
u8 crypt_len = 0;
+ bool take_ref;
phy_info = &mvm->last_phy_info;
rx_res = (struct iwl_rx_mpdu_res_start *)pkt->data;
@@ -319,7 +321,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->device_timestamp = le32_to_cpu(phy_info->system_timestamp);
rx_status->band =
(phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_BAND_24)) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
rx_status->freq =
ieee80211_channel_to_frequency(le16_to_cpu(phy_info->channel),
rx_status->band);
@@ -453,8 +455,26 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
mvm->sched_scan_pass_all == SCHED_SCAN_PASS_ALL_ENABLED))
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_FOUND;
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, hdr, len, ampdu_status,
- crypt_len, rxb);
+ if (unlikely(ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)))
+ rx_status->boottime_ns = ktime_get_boot_ns();
+
+ /* Take a reference briefly to kick off a d0i3 entry delay so
+ * we can handle bursts of RX packets without toggling the
+ * state too often. But don't do this for beacons if we are
+ * going to idle because the beacon filtering changes we make
+ * cause the firmware to send us collateral beacons. */
+ take_ref = !(test_bit(STATUS_TRANS_GOING_IDLE, &mvm->trans->status) &&
+ ieee80211_is_beacon(hdr->frame_control));
+
+ if (take_ref)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_RX);
+
+ iwl_mvm_pass_packet_to_mac80211(mvm, sta, napi, skb, hdr, len,
+ ampdu_status, crypt_len, rxb);
+
+ if (take_ref)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_RX);
}
static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 9a54f2d2a..2c61516d0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -210,7 +210,7 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
else
- ieee80211_rx_napi(mvm->hw, skb, napi);
+ ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
@@ -294,10 +294,15 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
+ u16 flags = le16_to_cpu(desc->l3l4_flags);
+ u8 l3_prot = (u8)((flags & IWL_RX_L3L4_L3_PROTO_MASK) >>
+ IWL_RX_L3_PROTO_POS);
if (mvmvif->features & NETIF_F_RXCSUM &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_IP_HDR_CSUM_OK) &&
- desc->l3l4_flags & cpu_to_le16(IWL_RX_L3L4_TCP_UDP_CSUM_OK))
+ flags & IWL_RX_L3L4_TCP_UDP_CSUM_OK &&
+ (flags & IWL_RX_L3L4_IP_HDR_CSUM_OK ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6 ||
+ l3_prot == IWL_RX_L3_TYPE_IPV6_FRAG))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -390,6 +395,150 @@ int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask,
return ret;
}
+/*
+ * Returns true if sn2 - buffer_size < sn1 < sn2.
+ * To be used only in order to compare reorder buffer head with NSSN.
+ * We fully trust NSSN unless it is behind us due to reorder timeout.
+ * Reorder timeout can only bring us up to buffer_size SNs ahead of NSSN.
+ */
+static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size)
+{
+ return ieee80211_sn_less(sn1, sn2) &&
+ !ieee80211_sn_less(sn1, sn2 - buffer_size);
+}
+
+#define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10)
+
+static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ struct napi_struct *napi,
+ struct iwl_mvm_reorder_buffer *reorder_buf,
+ u16 nssn)
+{
+ u16 ssn = reorder_buf->head_sn;
+
+ lockdep_assert_held(&reorder_buf->lock);
+
+ /* ignore nssn smaller than head sn - this can happen due to timeout */
+ if (iwl_mvm_is_sn_less(nssn, ssn, reorder_buf->buf_size))
+ return;
+
+ while (iwl_mvm_is_sn_less(ssn, nssn, reorder_buf->buf_size)) {
+ int index = ssn % reorder_buf->buf_size;
+ struct sk_buff_head *skb_list = &reorder_buf->entries[index];
+ struct sk_buff *skb;
+
+ ssn = ieee80211_sn_inc(ssn);
+
+ /* holes are valid since nssn indicates frames were received. */
+ if (skb_queue_empty(skb_list) || !skb_peek_tail(skb_list))
+ continue;
+ /* Empty the list. Will have more than one frame for A-MSDU */
+ while ((skb = __skb_dequeue(skb_list))) {
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
+ reorder_buf->queue,
+ sta);
+ reorder_buf->num_stored--;
+ }
+ }
+ reorder_buf->head_sn = nssn;
+
+ if (reorder_buf->num_stored && !reorder_buf->removed) {
+ u16 index = reorder_buf->head_sn % reorder_buf->buf_size;
+
+ while (!skb_peek_tail(&reorder_buf->entries[index]))
+ index = (index + 1) % reorder_buf->buf_size;
+ /* modify timer to match next frame's expiration time */
+ mod_timer(&reorder_buf->reorder_timer,
+ reorder_buf->reorder_time[index] + 1 +
+ RX_REORDER_BUF_TIMEOUT_MQ);
+ } else {
+ del_timer(&reorder_buf->reorder_timer);
+ }
+}
+
+void iwl_mvm_reorder_timer_expired(unsigned long data)
+{
+ struct iwl_mvm_reorder_buffer *buf = (void *)data;
+ int i;
+ u16 sn = 0, index = 0;
+ bool expired = false;
+
+ spin_lock_bh(&buf->lock);
+
+ if (!buf->num_stored || buf->removed) {
+ spin_unlock_bh(&buf->lock);
+ return;
+ }
+
+ for (i = 0; i < buf->buf_size ; i++) {
+ index = (buf->head_sn + i) % buf->buf_size;
+
+ if (!skb_peek_tail(&buf->entries[index]))
+ continue;
+ if (!time_after(jiffies, buf->reorder_time[index] +
+ RX_REORDER_BUF_TIMEOUT_MQ))
+ break;
+ expired = true;
+ sn = ieee80211_sn_add(buf->head_sn, i + 1);
+ }
+
+ if (expired) {
+ struct ieee80211_sta *sta;
+
+ rcu_read_lock();
+ sta = rcu_dereference(buf->mvm->fw_id_to_mac_id[buf->sta_id]);
+ /* SN is set to the last expired frame + 1 */
+ iwl_mvm_release_frames(buf->mvm, sta, NULL, buf, sn);
+ rcu_read_unlock();
+ } else if (buf->num_stored) {
+ /*
+ * If no frame expired and there are stored frames, index is now
+ * pointing to the first unexpired frame - modify timer
+ * accordingly to this frame.
+ */
+ mod_timer(&buf->reorder_timer,
+ buf->reorder_time[index] +
+ 1 + RX_REORDER_BUF_TIMEOUT_MQ);
+ }
+ spin_unlock_bh(&buf->lock);
+}
+
+static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
+ struct iwl_mvm_delba_data *data)
+{
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ u8 baid = data->baid;
+
+ if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ /* release all frames that are in the reorder buffer to the stack */
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, NULL, reorder_buf,
+ ieee80211_sn_add(reorder_buf->head_sn,
+ reorder_buf->buf_size));
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
int queue)
{
@@ -400,15 +549,184 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
notif = (void *)pkt->data;
internal_notif = (void *)notif->payload;
+ if (internal_notif->sync) {
+ if (mvm->queue_sync_cookie != internal_notif->cookie) {
+ WARN_ONCE(1,
+ "Received expired RX queue sync message\n");
+ return;
+ }
+ atomic_dec(&mvm->queue_sync_counter);
+ }
+
switch (internal_notif->type) {
+ case IWL_MVM_RXQ_EMPTY:
+ break;
case IWL_MVM_RXQ_NOTIF_DEL_BA:
- /* TODO */
+ iwl_mvm_del_ba(mvm, queue, (void *)internal_notif->data);
break;
default:
WARN_ONCE(1, "Invalid identifier %d", internal_notif->type);
}
}
+/*
+ * Returns true if the MPDU was buffered\dropped, false if it should be passed
+ * to upper layer.
+ */
+static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
+ struct napi_struct *napi,
+ int queue,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ struct iwl_rx_mpdu_desc *desc)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct iwl_mvm_baid_data *baid_data;
+ struct iwl_mvm_reorder_buffer *buffer;
+ struct sk_buff *tail;
+ u32 reorder = le32_to_cpu(desc->reorder_data);
+ bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
+ u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+ int index;
+ u16 nssn, sn;
+ u8 baid;
+
+ baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT;
+
+ if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
+ return false;
+
+ /* no sta yet */
+ if (WARN_ON(IS_ERR_OR_NULL(sta)))
+ return false;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+
+ /* not a data packet */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return false;
+
+ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
+ return false;
+
+ baid_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN(!baid_data,
+ "Received baid %d, but no data exists for this BAID\n", baid))
+ return false;
+ if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
+ "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
+ baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
+ tid))
+ return false;
+
+ nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
+ sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
+ IWL_RX_MPDU_REORDER_SN_SHIFT;
+
+ buffer = &baid_data->reorder_buf[queue];
+
+ spin_lock_bh(&buffer->lock);
+
+ /*
+ * If there was a significant jump in the nssn - adjust.
+ * If the SN is smaller than the NSSN it might need to first go into
+ * the reorder buffer, in which case we just release up to it and the
+ * rest of the function will take of storing it and releasing up to the
+ * nssn
+ */
+ if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
+ buffer->buf_size)) {
+ u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
+ }
+
+ /* drop any oudated packets */
+ if (ieee80211_sn_less(sn, buffer->head_sn))
+ goto drop;
+
+ /* release immediately if allowed by nssn and no stored frames */
+ if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
+ if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
+ buffer->buf_size))
+ buffer->head_sn = nssn;
+ /* No need to update AMSDU last SN - we are moving the head */
+ spin_unlock_bh(&buffer->lock);
+ return false;
+ }
+
+ index = sn % buffer->buf_size;
+
+ /*
+ * Check if we already stored this frame
+ * As AMSDU is either received or not as whole, logic is simple:
+ * If we have frames in that position in the buffer and the last frame
+ * originated from AMSDU had a different SN then it is a retransmission.
+ * If it is the same SN then if the subframe index is incrementing it
+ * is the same AMSDU - otherwise it is a retransmission.
+ */
+ tail = skb_peek_tail(&buffer->entries[index]);
+ if (tail && !amsdu)
+ goto drop;
+ else if (tail && (sn != buffer->last_amsdu ||
+ buffer->last_sub_index >= sub_frame_idx))
+ goto drop;
+
+ /* put in reorder buffer */
+ __skb_queue_tail(&buffer->entries[index], skb);
+ buffer->num_stored++;
+ buffer->reorder_time[index] = jiffies;
+
+ if (amsdu) {
+ buffer->last_amsdu = sn;
+ buffer->last_sub_index = sub_frame_idx;
+ }
+
+ iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+
+drop:
+ kfree_skb(skb);
+ spin_unlock_bh(&buffer->lock);
+ return true;
+}
+
+static void iwl_mvm_agg_rx_received(struct iwl_mvm *mvm, u8 baid)
+{
+ unsigned long now = jiffies;
+ unsigned long timeout;
+ struct iwl_mvm_baid_data *data;
+
+ rcu_read_lock();
+
+ data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON(!data))
+ goto out;
+
+ if (!data->timeout)
+ goto out;
+
+ timeout = data->timeout;
+ /*
+ * Do not update last rx all the time to avoid cache bouncing
+ * between the rx queues.
+ * Update it every timeout. Worst case is the session will
+ * expire after ~ 2 * timeout, which doesn't matter that much.
+ */
+ if (time_before(data->last_rx + TU_TO_JIFFIES(timeout), now))
+ /* Update is atomic */
+ data->last_rx = now;
+
+out:
+ rcu_read_unlock();
+}
+
void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
@@ -451,8 +769,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
- rx_status->band = desc->channel > 14 ? IEEE80211_BAND_5GHZ :
- IEEE80211_BAND_2GHZ;
+ rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
+ NL80211_BAND_2GHZ;
rx_status->freq = ieee80211_channel_to_frequency(desc->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status);
@@ -479,6 +797,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ u8 baid = (u8)((le32_to_cpu(desc->reorder_data) &
+ IWL_RX_MPDU_REORDER_BAID_MASK) >>
+ IWL_RX_MPDU_REORDER_BAID_SHIFT);
/*
* We have tx blocked stations (with CS bit). If we heard
@@ -531,6 +852,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
+ if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
+ iwl_mvm_agg_rx_received(mvm, baid);
}
/*
@@ -588,12 +911,42 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
/* TODO: PHY info - gscan */
iwl_mvm_create_skb(skb, hdr, len, crypt_len, rxb);
- iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
+ if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
+ iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta);
rcu_read_unlock();
}
-void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm,
+void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, int queue)
{
- /* TODO */
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_frame_release *release = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_reorder_buffer *reorder_buf;
+ struct iwl_mvm_baid_data *ba_data;
+
+ int baid = release->baid;
+
+ if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(mvm->baid_map[baid]);
+ if (WARN_ON_ONCE(!ba_data))
+ goto out;
+
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
+ goto out;
+
+ reorder_buf = &ba_data->reorder_buf[queue];
+
+ spin_lock_bh(&reorder_buf->lock);
+ iwl_mvm_release_frames(mvm, sta, napi, reorder_buf,
+ le16_to_cpu(release->nssn));
+ spin_unlock_bh(&reorder_buf->lock);
+
+out:
+ rcu_read_unlock();
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 09eb72c4a..e78fc567f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -70,6 +70,7 @@
#include "mvm.h"
#include "fw-api-scan.h"
+#include "iwl-io.h"
#define IWL_DENSE_EBS_SCAN_RATIO 5
#define IWL_SPARSE_EBS_SCAN_RATIO 1
@@ -162,16 +163,16 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
-static __le32 iwl_mvm_scan_rxon_flags(enum ieee80211_band band)
+static __le32 iwl_mvm_scan_rxon_flags(enum nl80211_band band)
{
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
return cpu_to_le32(PHY_BAND_24);
else
return cpu_to_le32(PHY_BAND_5);
}
static inline __le32
-iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
+iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
bool no_cck)
{
u32 tx_ant;
@@ -181,7 +182,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum ieee80211_band band,
mvm->scan_last_antenna_idx);
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
- if (band == IEEE80211_BAND_2GHZ && !no_cck)
+ if (band == NL80211_BAND_2GHZ && !no_cck)
return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
tx_ant);
else
@@ -398,6 +399,10 @@ void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
ieee80211_scan_completed(mvm->hw,
scan_notif->status == IWL_SCAN_OFFLOAD_ABORTED);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
+ } else {
+ IWL_ERR(mvm,
+ "got scan complete notification but no scan is running\n");
}
mvm->last_ebs_successful =
@@ -586,14 +591,14 @@ static void iwl_mvm_scan_fill_tx_cmd(struct iwl_mvm *mvm,
tx_cmd[0].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
tx_cmd[0].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
no_cck);
tx_cmd[0].sta_id = mvm->aux_sta.sta_id;
tx_cmd[1].tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
TX_CMD_FLG_BT_DIS);
tx_cmd[1].rate_n_flags = iwl_mvm_scan_rate_n_flags(mvm,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
no_cck);
tx_cmd[1].sta_id = mvm->aux_sta.sta_id;
}
@@ -690,19 +695,19 @@ iwl_mvm_build_scan_probe(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* Insert ds parameter set element on 2.4 GHz band */
newpos = iwl_mvm_copy_and_insert_ds_elem(mvm,
- ies->ies[IEEE80211_BAND_2GHZ],
- ies->len[IEEE80211_BAND_2GHZ],
+ ies->ies[NL80211_BAND_2GHZ],
+ ies->len[NL80211_BAND_2GHZ],
pos);
params->preq.band_data[0].offset = cpu_to_le16(pos - params->preq.buf);
params->preq.band_data[0].len = cpu_to_le16(newpos - pos);
pos = newpos;
- memcpy(pos, ies->ies[IEEE80211_BAND_5GHZ],
- ies->len[IEEE80211_BAND_5GHZ]);
+ memcpy(pos, ies->ies[NL80211_BAND_5GHZ],
+ ies->len[NL80211_BAND_5GHZ]);
params->preq.band_data[1].offset = cpu_to_le16(pos - params->preq.buf);
params->preq.band_data[1].len =
- cpu_to_le16(ies->len[IEEE80211_BAND_5GHZ]);
- pos += ies->len[IEEE80211_BAND_5GHZ];
+ cpu_to_le16(ies->len[NL80211_BAND_5GHZ]);
+ pos += ies->len[NL80211_BAND_5GHZ];
memcpy(pos, ies->common_ies, ies->common_ie_len);
params->preq.common_data.offset = cpu_to_le16(pos - params->preq.buf);
@@ -916,10 +921,10 @@ static __le32 iwl_mvm_scan_config_rates(struct iwl_mvm *mvm)
unsigned int rates = 0;
int i;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
- band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_bitrates; i++)
rates |= rate_to_scan_rate_flag(band->bitrates[i].hw_value);
@@ -934,8 +939,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
struct iwl_scan_config *scan_config;
struct ieee80211_supported_band *band;
int num_channels =
- mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
- mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+ mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels +
+ mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels;
int ret, i, j = 0, cmd_size;
struct iwl_host_cmd cmd = {
.id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
@@ -961,6 +966,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
SCAN_CONFIG_FLAG_SET_TX_CHAINS |
SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
SCAN_CONFIG_FLAG_SET_ALL_TIMES |
SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
SCAN_CONFIG_FLAG_SET_MAC_ADDR |
@@ -988,10 +994,10 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
IWL_CHANNEL_FLAG_EBS_ADD |
IWL_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_2GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_2GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
- band = &mvm->nvm_data->bands[IEEE80211_BAND_5GHZ];
+ band = &mvm->nvm_data->bands[NL80211_BAND_5GHZ];
for (i = 0; i < band->n_channels; i++, j++)
scan_config->channel_array[j] = band->channels[i].hw_value;
@@ -1216,6 +1222,18 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type)
return -EIO;
}
+#define SCAN_TIMEOUT (20 * HZ)
+
+void iwl_mvm_scan_timeout(unsigned long data)
+{
+ struct iwl_mvm *mvm = (struct iwl_mvm *)data;
+
+ IWL_ERR(mvm, "regular scan timed out\n");
+
+ del_timer(&mvm->scan_timer);
+ iwl_force_nmi(mvm->trans);
+}
+
int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req,
struct ieee80211_scan_ies *ies)
@@ -1295,6 +1313,8 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ mod_timer(&mvm->scan_timer, jiffies + SCAN_TIMEOUT);
+
return 0;
}
@@ -1412,6 +1432,7 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_REGULAR) {
ieee80211_scan_completed(mvm->hw, aborted);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
@@ -1607,6 +1628,7 @@ out:
* to release the scan reference here.
*/
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+ del_timer(&mvm->scan_timer);
if (notify)
ieee80211_scan_completed(mvm->hw, true);
} else if (notify) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
index c2def1232..443a42855 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sf.c
@@ -193,7 +193,7 @@ static void iwl_mvm_fill_sf_command(struct iwl_mvm *mvm,
}
}
- if (sta || IWL_UCODE_API(mvm->fw->ucode_ver) < 13) {
+ if (sta) {
BUILD_BUG_ON(sizeof(sf_full_timeout) !=
sizeof(__le32) * SF_NUM_SCENARIO *
SF_NUM_TIMEOUT_TYPES);
@@ -220,9 +220,6 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
struct ieee80211_sta *sta;
int ret = 0;
- if (IWL_UCODE_API(mvm->fw->ucode_ver) < 13)
- sf_cmd.state = cpu_to_le32(new_state);
-
if (mvm->cfg->disable_dummy_notification)
sf_cmd.state |= cpu_to_le32(SF_CFG_DUMMY_NOTIF_OFF);
@@ -235,8 +232,7 @@ static int iwl_mvm_sf_config(struct iwl_mvm *mvm, u8 sta_id,
switch (new_state) {
case SF_UNINIT:
- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 13)
- iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
+ iwl_mvm_fill_sf_command(mvm, &sf_cmd, NULL);
break;
case SF_FULL_ON:
if (sta_id == IWL_MVM_STATION_COUNT) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index ef99942d7..b23ab4a45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
/* send station add/update command to firmware */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- bool update)
+ bool update, unsigned int flags)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd add_sta_cmd = {
@@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
- if (!update) {
+ if (!update || (flags & STA_MODIFY_QUEUES)) {
add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
+
+ if (flags & STA_MODIFY_QUEUES)
+ add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
}
switch (sta->bandwidth) {
@@ -220,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
+static void iwl_mvm_rx_agg_session_expired(unsigned long data)
+{
+ struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data;
+ struct iwl_mvm_baid_data *ba_data;
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvm_sta;
+ unsigned long timeout;
+
+ rcu_read_lock();
+
+ ba_data = rcu_dereference(*rcu_ptr);
+
+ if (WARN_ON(!ba_data))
+ goto unlock;
+
+ if (!ba_data->timeout)
+ goto unlock;
+
+ timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
+ if (time_is_after_jiffies(timeout)) {
+ mod_timer(&ba_data->session_timer, timeout);
+ goto unlock;
+ }
+
+ /* Timer expired */
+ sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ ieee80211_stop_rx_ba_session_offl(mvm_sta->vif,
+ sta->addr, ba_data->tid);
+unlock:
+ rcu_read_unlock();
+}
+
static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
@@ -274,6 +310,229 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm,
iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0);
}
+static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, u8 ac, int tid,
+ struct ieee80211_hdr *hdr)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = iwl_mvm_ac_to_tx_fifo[ac],
+ .sta_id = mvmsta->sta_id,
+ .tid = tid,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
+ u8 mac_queue = mvmsta->vif->hw_queue[ac];
+ int queue = -1;
+ int ssn;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one
+ * exists
+ */
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_MGMT_QUEUE,
+ IWL_MVM_DQA_MAX_MGMT_QUEUE);
+ if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
+ IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
+ queue);
+
+ /* If no such queue is found, we'll use a DATA queue instead */
+ }
+
+ if (queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
+ queue = mvmsta->reserved_queue;
+ IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
+ }
+
+ if (queue < 0)
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+
+ /*
+ * Mark TXQ as ready, even though it hasn't been fully configured yet,
+ * to make sure no one else takes it.
+ * This will allow avoiding re-acquiring the lock at the end of the
+ * configuration. On error we'll mark it back as free.
+ */
+ if (queue >= 0)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /* TODO: support shared queues for same RA */
+ if (queue < 0)
+ return -ENOSPC;
+
+ /*
+ * Actual en/disablement of aggregations is through the ADD_STA HCMD,
+ * but for configuring the SCD to send A-MPDUs we need to mark the queue
+ * as aggregatable.
+ * Mark all DATA queues as allowing to be aggregated at some point
+ */
+ cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Allocating queue #%d to sta %d on tid %d\n",
+ queue, mvmsta->sta_id, tid);
+
+ ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg,
+ wdg_timeout);
+
+ spin_lock_bh(&mvmsta->lock);
+ mvmsta->tid_data[tid].txq_id = queue;
+ mvmsta->tfd_queue_msk |= BIT(queue);
+
+ if (mvmsta->reserved_queue == queue)
+ mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
+ spin_unlock_bh(&mvmsta->lock);
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0);
+
+ return ret;
+}
+
+static inline u8 iwl_mvm_tid_to_ac_queue(int tid)
+{
+ if (tid == IWL_MAX_TID_COUNT)
+ return IEEE80211_AC_VO; /* MGMT */
+
+ return tid_to_mac80211_ac[tid];
+}
+
+static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta, int tid)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
+ struct sk_buff *skb;
+ struct ieee80211_hdr *hdr;
+ struct sk_buff_head deferred_tx;
+ u8 mac_queue;
+ bool no_queue = false; /* Marks if there is a problem with the queue */
+ u8 ac;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ skb = skb_peek(&tid_data->deferred_tx_frames);
+ if (!skb)
+ return;
+ hdr = (void *)skb->data;
+
+ ac = iwl_mvm_tid_to_ac_queue(tid);
+ mac_queue = IEEE80211_SKB_CB(skb)->hw_queue;
+
+ if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE &&
+ iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) {
+ IWL_ERR(mvm,
+ "Can't alloc TXQ for sta %d tid %d - dropping frame\n",
+ mvmsta->sta_id, tid);
+
+ /*
+ * Mark queue as problematic so later the deferred traffic is
+ * freed, as we can do nothing with it
+ */
+ no_queue = true;
+ }
+
+ __skb_queue_head_init(&deferred_tx);
+
+ /* Disable bottom-halves when entering TX path */
+ local_bh_disable();
+ spin_lock(&mvmsta->lock);
+ skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx);
+ spin_unlock(&mvmsta->lock);
+
+ while ((skb = __skb_dequeue(&deferred_tx)))
+ if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta))
+ ieee80211_free_txskb(mvm->hw, skb);
+ local_bh_enable();
+
+ /* Wake queue */
+ iwl_mvm_start_mac_queues(mvm, BIT(mac_queue));
+}
+
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
+ add_stream_wk);
+ struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long deferred_tid_traffic;
+ int sta_id, tid;
+
+ mutex_lock(&mvm->mutex);
+
+ /* Go over all stations with deferred traffic */
+ for_each_set_bit(sta_id, mvm->sta_deferred_frames,
+ IWL_MVM_STATION_COUNT) {
+ clear_bit(sta_id, mvm->sta_deferred_frames);
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+ if (IS_ERR_OR_NULL(sta))
+ continue;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ deferred_tid_traffic = mvmsta->deferred_traffic_tid_map;
+
+ for_each_set_bit(tid, &deferred_tid_traffic,
+ IWL_MAX_TID_COUNT + 1)
+ iwl_mvm_tx_deferred_stream(mvm, sta, tid);
+ }
+
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
+ struct ieee80211_sta *sta,
+ enum nl80211_iftype vif_type)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int queue;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+
+ /* Make sure we have free resources for this STA */
+ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
+ !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount &&
+ (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
+ IWL_MVM_QUEUE_FREE))
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
+ else
+ queue = iwl_mvm_find_free_queue(mvm, IWL_MVM_DQA_MIN_DATA_QUEUE,
+ IWL_MVM_DQA_MAX_DATA_QUEUE);
+ if (queue < 0) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "No available queues for new station\n");
+ return -ENOSPC;
+ }
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
+
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ mvmsta->reserved_queue = queue;
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
+ queue, mvmsta->sta_id);
+
+ return 0;
+}
+
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -314,18 +573,29 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_tdls_sta_init(mvm, sta);
if (ret)
return ret;
- } else {
+ } else if (!iwl_mvm_is_dqa_supported(mvm)) {
for (i = 0; i < IEEE80211_NUM_ACS; i++)
if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE)
mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]);
}
/* for HW restart - reset everything but the sequence number */
- for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_sta->tid_data[i].seq_number;
memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
mvm_sta->tid_data[i].seq_number = seq;
+
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ continue;
+
+ /*
+ * Mark all queues for this STA as unallocated and defer TX
+ * frames until the queue is allocated
+ */
+ mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames);
}
+ mvm_sta->deferred_traffic_tid_map = 0;
mvm_sta->agg_tids = 0;
if (iwl_mvm_has_new_rx_api(mvm) &&
@@ -338,7 +608,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
mvm_sta->dup_data = dup_data;
}
- ret = iwl_mvm_sta_send_to_fw(mvm, sta, false);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ ret = iwl_mvm_reserve_sta_stream(mvm, sta,
+ ieee80211_vif_type_p2p(vif));
+ if (ret)
+ goto err;
+ }
+
+ ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0);
if (ret)
goto err;
@@ -364,7 +641,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
- return iwl_mvm_sta_send_to_fw(mvm, sta, true);
+ return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0);
}
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
@@ -509,6 +786,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk)
mutex_unlock(&mvm->mutex);
}
+static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvm_sta)
+{
+ int ac;
+ int i;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
+ if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE)
+ continue;
+
+ ac = iwl_mvm_tid_to_ac_queue(i);
+ iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id,
+ vif->hw_queue[ac], i, 0);
+ mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE;
+ }
+}
+
int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -537,6 +834,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
return ret;
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
+ /* If DQA is supported - the queues can be disabled now */
+ if (iwl_mvm_is_dqa_supported(mvm))
+ iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
+
/* if we are associated - we can't remove the AP STA now */
if (vif->bss_conf.assoc)
return ret;
@@ -750,6 +1051,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
lockdep_assert_held(&mvm->mutex);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ struct iwl_trans_txq_scd_cfg cfg = {
+ .fifo = IWL_MVM_TX_FIFO_VO,
+ .sta_id = mvmvif->bcast_sta.sta_id,
+ .tid = IWL_MAX_TID_COUNT,
+ .aggregate = false,
+ .frame_limit = IWL_FRAME_LIMIT,
+ };
+ unsigned int wdg_timeout =
+ iwl_mvm_get_wd_timeout(mvm, vif, false, false);
+ int queue;
+
+ if ((vif->type == NL80211_IFTYPE_AP) &&
+ (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE)))
+ queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) &&
+ (mvmvif->bcast_sta.tfd_queue_msk &
+ BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE)))
+ queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ else if (WARN(1, "Missed required TXQ for adding bcast STA\n"))
+ return -EINVAL;
+
+ iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg,
+ wdg_timeout);
+ }
+
if (vif->type == NL80211_IFTYPE_ADHOC)
baddr = vif->bss_conf.bssid;
@@ -778,20 +1106,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- u32 qmask;
+ u32 qmask = 0;
lockdep_assert_held(&mvm->mutex);
- qmask = iwl_mvm_mac_get_queues_mask(vif);
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ qmask = iwl_mvm_mac_get_queues_mask(vif);
- /*
- * The firmware defines the TFD queue mask to only be relevant
- * for *unicast* queues, so the multicast (CAB) queue shouldn't
- * be included.
- */
- if (vif->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * The firmware defines the TFD queue mask to only be relevant
+ * for *unicast* queues, so the multicast (CAB) queue shouldn't
+ * be included.
+ */
qmask &= ~BIT(vif->cab_queue);
+ if (iwl_mvm_is_dqa_supported(mvm))
+ qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE);
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE);
+ }
+
return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask,
ieee80211_vif_type_p2p(vif));
}
@@ -849,11 +1185,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#define IWL_MAX_RX_BA_SESSIONS 16
+static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
+{
+ struct iwl_mvm_delba_notif notif = {
+ .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA,
+ .metadata.sync = 1,
+ .delba.baid = baid,
+ };
+ iwl_mvm_sync_rx_queues_internal(mvm, (void *)&notif, sizeof(notif));
+};
+
+static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
+ struct iwl_mvm_baid_data *data)
+{
+ int i;
+
+ iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ int j;
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+
+ spin_lock_bh(&reorder_buf->lock);
+ if (likely(!reorder_buf->num_stored)) {
+ spin_unlock_bh(&reorder_buf->lock);
+ continue;
+ }
+
+ /*
+ * This shouldn't happen in regular DELBA since the internal
+ * delBA notification should trigger a release of all frames in
+ * the reorder buffer.
+ */
+ WARN_ON(1);
+
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_purge(&reorder_buf->entries[j]);
+ /*
+ * Prevent timer re-arm. This prevents a very far fetched case
+ * where we timed out on the notification. There may be prior
+ * RX frames pending in the RX queue before the notification
+ * that might get processed between now and the actual deletion
+ * and we would re-arm the timer although we are deleting the
+ * reorder buffer.
+ */
+ reorder_buf->removed = true;
+ spin_unlock_bh(&reorder_buf->lock);
+ del_timer_sync(&reorder_buf->reorder_timer);
+ }
+}
+
+static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
+ u32 sta_id,
+ struct iwl_mvm_baid_data *data,
+ u16 ssn, u8 buf_size)
+{
+ int i;
+
+ for (i = 0; i < mvm->trans->num_rx_queues; i++) {
+ struct iwl_mvm_reorder_buffer *reorder_buf =
+ &data->reorder_buf[i];
+ int j;
+
+ reorder_buf->num_stored = 0;
+ reorder_buf->head_sn = ssn;
+ reorder_buf->buf_size = buf_size;
+ /* rx reorder timer */
+ reorder_buf->reorder_timer.function =
+ iwl_mvm_reorder_timer_expired;
+ reorder_buf->reorder_timer.data = (unsigned long)reorder_buf;
+ init_timer(&reorder_buf->reorder_timer);
+ spin_lock_init(&reorder_buf->lock);
+ reorder_buf->mvm = mvm;
+ reorder_buf->queue = i;
+ reorder_buf->sta_id = sta_id;
+ for (j = 0; j < reorder_buf->buf_size; j++)
+ __skb_queue_head_init(&reorder_buf->entries[j]);
+ }
+}
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size)
+ int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
+ struct iwl_mvm_baid_data *baid_data = NULL;
int ret;
u32 status;
@@ -864,6 +1281,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return -ENOSPC;
}
+ if (iwl_mvm_has_new_rx_api(mvm) && start) {
+ /*
+ * Allocate here so if allocation fails we can bail out early
+ * before starting the BA session in the firmware
+ */
+ baid_data = kzalloc(sizeof(*baid_data) +
+ mvm->trans->num_rx_queues *
+ sizeof(baid_data->reorder_buf[0]),
+ GFP_KERNEL);
+ if (!baid_data)
+ return -ENOMEM;
+ }
+
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
@@ -882,7 +1312,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
iwl_mvm_add_sta_cmd_size(mvm),
&cmd, &status);
if (ret)
- return ret;
+ goto out_free;
switch (status & IWL_ADD_STA_STATUS_MASK) {
case ADD_STA_SUCCESS:
@@ -900,14 +1330,75 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- if (!ret) {
- if (start)
- mvm->rx_ba_sessions++;
- else if (mvm->rx_ba_sessions > 0)
- /* check that restart flow didn't zero the counter */
- mvm->rx_ba_sessions--;
+ if (ret)
+ goto out_free;
+
+ if (start) {
+ u8 baid;
+
+ mvm->rx_ba_sessions++;
+
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >>
+ IWL_ADD_STA_BAID_SHIFT);
+ baid_data->baid = baid;
+ baid_data->timeout = timeout;
+ baid_data->last_rx = jiffies;
+ init_timer(&baid_data->session_timer);
+ baid_data->session_timer.function =
+ iwl_mvm_rx_agg_session_expired;
+ baid_data->session_timer.data =
+ (unsigned long)&mvm->baid_map[baid];
+ baid_data->mvm = mvm;
+ baid_data->tid = tid;
+ baid_data->sta_id = mvm_sta->sta_id;
+
+ mvm_sta->tid_to_baid[tid] = baid;
+ if (timeout)
+ mod_timer(&baid_data->session_timer,
+ TU_TO_EXP_TIME(timeout * 2));
+
+ iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id,
+ baid_data, ssn, buf_size);
+ /*
+ * protect the BA data with RCU to cover a case where our
+ * internal RX sync mechanism will timeout (not that it's
+ * supposed to happen) and we will free the session data while
+ * RX is being processed in parallel
+ */
+ WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
+ rcu_assign_pointer(mvm->baid_map[baid], baid_data);
+ } else if (mvm->rx_ba_sessions > 0) {
+ u8 baid = mvm_sta->tid_to_baid[tid];
+
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return 0;
+
+ if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
+ return -EINVAL;
+
+ baid_data = rcu_access_pointer(mvm->baid_map[baid]);
+ if (WARN_ON(!baid_data))
+ return -EINVAL;
+
+ /* synchronize all rx queues so we can safely delete */
+ iwl_mvm_free_reorder(mvm, baid_data);
+ del_timer_sync(&baid_data->session_timer);
+ RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
+ kfree_rcu(baid_data, rcu_head);
}
+ return 0;
+out_free:
+ kfree(baid_data);
return ret;
}
@@ -925,7 +1416,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
mvm_sta->tfd_queue_msk |= BIT(queue);
mvm_sta->tid_disable_agg &= ~BIT(tid);
} else {
- mvm_sta->tfd_queue_msk &= ~BIT(queue);
+ /* In DQA-mode the queue isn't removed on agg termination */
+ if (!iwl_mvm_is_dqa_supported(mvm))
+ mvm_sta->tfd_queue_msk &= ~BIT(queue);
mvm_sta->tid_disable_agg |= BIT(tid);
}
@@ -1008,17 +1501,35 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
spin_lock_bh(&mvm->queue_info_lock);
- txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
- mvm->last_agg_queue);
- if (txq_id < 0) {
- ret = txq_id;
- spin_unlock_bh(&mvm->queue_info_lock);
- IWL_ERR(mvm, "Failed to allocate agg queue\n");
- goto release_locks;
+ /*
+ * Note the possible cases:
+ * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed
+ * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free
+ * one and mark it as reserved
+ * 3. In DQA mode, but no traffic yet on this TID: same treatment as in
+ * non-DQA mode, since the TXQ hasn't yet been allocated
+ */
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ if (!iwl_mvm_is_dqa_supported(mvm) ||
+ mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) {
+ txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue,
+ mvm->last_agg_queue);
+ if (txq_id < 0) {
+ ret = txq_id;
+ spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ goto release_locks;
+ }
+
+ /* TXQ hasn't yet been enabled, so mark it only as reserved */
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
}
- mvm->queue_info[txq_id].setup_reserved = true;
spin_unlock_bh(&mvm->queue_info_lock);
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "AGG for tid %d will be on queue #%d\n",
+ tid, txq_id);
+
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
@@ -1053,6 +1564,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
unsigned int wdg_timeout =
iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
int queue, ret;
+ bool alloc_queue = true;
u16 ssn;
struct iwl_trans_txq_scd_cfg cfg = {
@@ -1078,8 +1590,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
- iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]],
- ssn, &cfg, wdg_timeout);
+ /* In DQA mode, the existing queue might need to be reconfigured */
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ spin_lock_bh(&mvm->queue_info_lock);
+ /* Maybe there is no need to even alloc a queue... */
+ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
+ alloc_queue = false;
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ /*
+ * Only reconfig the SCD for the queue if the window size has
+ * changed from current (become smaller)
+ */
+ if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) {
+ /*
+ * If reconfiguring an existing queue, it first must be
+ * drained
+ */
+ ret = iwl_trans_wait_tx_queue_empty(mvm->trans,
+ BIT(queue));
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error draining queue before reconfig\n");
+ return ret;
+ }
+
+ ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
+ mvmsta->sta_id, tid,
+ buf_size, ssn);
+ if (ret) {
+ IWL_ERR(mvm,
+ "Error reconfiguring TXQ #%d\n", queue);
+ return ret;
+ }
+ }
+ }
+
+ if (alloc_queue)
+ iwl_mvm_enable_txq(mvm, queue,
+ vif->hw_queue[tid_to_mac80211_ac[tid]], ssn,
+ &cfg, wdg_timeout);
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1087,7 +1637,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[queue].setup_reserved = false;
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
spin_unlock_bh(&mvm->queue_info_lock);
/*
@@ -1134,9 +1684,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
- /* No need to mark as reserved anymore */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].setup_reserved = false;
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
switch (tid_data->state) {
@@ -1162,9 +1719,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0);
+ }
return 0;
case IWL_AGG_STARTING:
case IWL_EMPTYING_HW_QUEUE_ADDBA:
@@ -1215,9 +1774,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
mvmsta->agg_tids &= ~BIT(tid);
spin_unlock_bh(&mvmsta->lock);
- /* No need to mark as reserved */
spin_lock_bh(&mvm->queue_info_lock);
- mvm->queue_info[txq_id].setup_reserved = false;
+ /*
+ * The TXQ is marked as reserved only if no traffic came through yet
+ * This means no traffic has been sent on this TID (agg'd or not), so
+ * we no longer have use for the queue. Since it hasn't even been
+ * allocated through iwl_mvm_enable_txq, so we can just mark it back as
+ * free.
+ */
+ if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED)
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
if (old_state >= IWL_AGG_ON) {
@@ -1230,9 +1796,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
- iwl_mvm_disable_txq(mvm, tid_data->txq_id,
- vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
- 0);
+ if (!iwl_mvm_is_dqa_supported(mvm)) {
+ int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]];
+
+ iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue,
+ tid, 0);
+ }
}
return 0;
@@ -1285,6 +1854,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex));
+
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
@@ -1391,6 +1961,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
struct ieee80211_key_seq seq;
const u8 *pn;
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
+ break;
+ default:
+ return -EINVAL;
+ }
+
memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen);
ieee80211_get_key_rx_seq(keyconf, 0, &seq);
pn = seq.aes_cmac.pn;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 1a8f69a41..d2c58f134 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -7,7 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -34,7 +34,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright(c) 2015 Intel Deutschland GmbH
+ * Copyright(c) 2015 - 2016 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -80,6 +80,60 @@ struct iwl_mvm;
struct iwl_mvm_vif;
/**
+ * DOC: DQA - Dynamic Queue Allocation -introduction
+ *
+ * Dynamic Queue Allocation (AKA "DQA") is a feature implemented in iwlwifi
+ * driver to allow dynamic allocation of queues on-demand, rather than allocate
+ * them statically ahead of time. Ideally, we would like to allocate one queue
+ * per RA/TID, thus allowing an AP - for example - to send BE traffic to STA2
+ * even if it also needs to send traffic to a sleeping STA1, without being
+ * blocked by the sleeping station.
+ *
+ * Although the queues in DQA mode are dynamically allocated, there are still
+ * some queues that are statically allocated:
+ * TXQ #0 - command queue
+ * TXQ #1 - aux frames
+ * TXQ #2 - P2P device frames
+ * TXQ #3 - P2P GO/SoftAP GCAST/BCAST frames
+ * TXQ #4 - BSS DATA frames queue
+ * TXQ #5-8 - Non-QoS and MGMT frames queue pool
+ * TXQ #9 - P2P GO/SoftAP probe responses
+ * TXQ #10-31 - DATA frames queue pool
+ * The queues are dynamically taken from either the MGMT frames queue pool or
+ * the DATA frames one. See the %iwl_mvm_dqa_txq for more information on every
+ * queue.
+ *
+ * When a frame for a previously unseen RA/TID comes in, it needs to be deferred
+ * until a queue is allocated for it, and only then can be TXed. Therefore, it
+ * is placed into %iwl_mvm_tid_data.deferred_tx_frames, and a worker called
+ * %mvm->add_stream_wk later allocates the queues and TXes the deferred frames.
+ *
+ * For convenience, MGMT is considered as if it has TID=8, and go to the MGMT
+ * queues in the pool. If there is no longer a free MGMT queue to allocate, a
+ * queue will be allocated from the DATA pool instead. Since QoS NDPs can create
+ * a problem for aggregations, they too will use a MGMT queue.
+ *
+ * When adding a STA, a DATA queue is reserved for it so that it can TX from
+ * it. If no such free queue exists for reserving, the STA addition will fail.
+ *
+ * If the DATA queue pool gets exhausted, no new STA will be accepted, and if a
+ * new RA/TID comes in for an existing STA, one of the STA's queues will become
+ * shared and will serve more than the single TID (but always for the same RA!).
+ *
+ * When a RA/TID needs to become aggregated, no new queue is required to be
+ * allocated, only mark the queue as aggregated via the ADD_STA command. Note,
+ * however, that a shared queue cannot be aggregated, and only after the other
+ * TIDs become inactive and are removed - only then can the queue be
+ * reconfigured and become aggregated.
+ *
+ * When removing a station, its queues are returned to the pool for reuse. Here
+ * we also need to make sure that we are synced with the worker thread that TXes
+ * the deferred frames so we don't get into a situation where the queues are
+ * removed and then the worker puts deferred frames onto the released queues or
+ * tries to allocate new queues for a STA we don't need anymore.
+ */
+
+/**
* DOC: station table - introduction
*
* The station table is a list of data structure that reprensent the stations.
@@ -253,6 +307,7 @@ enum iwl_mvm_agg_state {
/**
* struct iwl_mvm_tid_data - holds the states for each RA / TID
+ * @deferred_tx_frames: deferred TX frames for this RA/TID
* @seq_number: the next WiFi sequence number to use
* @next_reclaimed: the WiFi sequence number of the next packet to be acked.
* This is basically (last acked packet++).
@@ -260,7 +315,7 @@ enum iwl_mvm_agg_state {
* Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA).
* @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed.
* @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session
+ * @txq_id: Tx queue used by the BA session / DQA
* @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
* the first packet to be sent in legacy HW queue in Tx AGG stop flow.
* Basically when next_reclaimed reaches ssn, we can tell mac80211 that
@@ -268,6 +323,7 @@ enum iwl_mvm_agg_state {
* @tx_time: medium time consumed by this A-MPDU
*/
struct iwl_mvm_tid_data {
+ struct sk_buff_head deferred_tx_frames;
u16 seq_number;
u16 next_reclaimed;
/* The rest is Tx AGG related */
@@ -292,6 +348,15 @@ struct iwl_mvm_key_pn {
} ____cacheline_aligned_in_smp q[];
};
+struct iwl_mvm_delba_data {
+ u32 baid;
+} __packed;
+
+struct iwl_mvm_delba_notif {
+ struct iwl_mvm_internal_rxq_notif metadata;
+ struct iwl_mvm_delba_data delba;
+} __packed;
+
/**
* struct iwl_mvm_rxq_dup_data - per station per rx queue data
* @last_seq: last sequence per tid for duplicate packet detection
@@ -316,7 +381,11 @@ struct iwl_mvm_rxq_dup_data {
* we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
- * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
+ * @tid_data: per tid data + mgmt. Look at %iwl_mvm_tid_data.
+ * @tid_to_baid: a simple map of TID to baid
+ * @reserved_queue: the queue reserved for this STA for DQA purposes
+ * Every STA has is given one reserved queue to allow it to operate. If no
+ * such queue can be guaranteed, the STA addition will fail.
* @tx_protection: reference counter for controlling the Tx protection.
* @tt_tx_protection: is thermal throttling enable Tx protection?
* @disable_tx: is tx to this STA disabled?
@@ -329,6 +398,7 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
+ * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -345,12 +415,17 @@ struct iwl_mvm_sta {
bool bt_reduced_txpower;
bool next_status_eosp;
spinlock_t lock;
- struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
+ struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT + 1];
+ u8 tid_to_baid[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
struct ieee80211_vif *vif;
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
+ u16 deferred_traffic_tid_map;
+
+ u8 reserved_queue;
+
/* Temporary, until the new TLC will control the Tx protection */
s8 tx_protection;
bool tt_tx_protection;
@@ -378,8 +453,18 @@ struct iwl_mvm_int_sta {
u32 tfd_queue_msk;
};
+/**
+ * Send the STA info to the FW.
+ *
+ * @mvm: the iwl_mvm* to use
+ * @sta: the STA
+ * @update: this is true if the FW is being updated about a STA it already knows
+ * about. Otherwise (if this is a new STA), this should be false.
+ * @flags: if update==true, this marks what is being changed via ORs of values
+ * from enum iwl_sta_modify_flag. Otherwise, this is ignored.
+ */
int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- bool update);
+ bool update, unsigned int flags);
int iwl_mvm_add_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -413,7 +498,7 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
- int tid, u16 ssn, bool start, u8 buf_size);
+ int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -459,5 +544,6 @@ void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
bool disable);
void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk);
#endif /* __sta_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index 18711c5de..9f160fc58 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -444,7 +444,7 @@ iwl_mvm_tdls_config_channel_switch(struct iwl_mvm *mvm,
}
if (chandef) {
- cmd.ci.band = (chandef->chan->band == IEEE80211_BAND_2GHZ ?
+ cmd.ci.band = (chandef->chan->band == NL80211_BAND_2GHZ ?
PHY_BAND_24 : PHY_BAND_5);
cmd.ci.channel = chandef->chan->hw_value;
cmd.ci.width = iwl_mvm_get_channel_width(chandef);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index f1f28255a..58fc7b3c7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -204,20 +204,11 @@ void iwl_mvm_temp_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON(ths_crossed >= IWL_MAX_DTS_TRIPS))
return;
- /*
- * We are now handling a temperature notification from the firmware
- * in ASYNC and hold the mutex. thermal_notify_framework will call
- * us back through get_temp() which ought to send a SYNC command to
- * the firmware and hence to take the mutex.
- * Avoid the deadlock by unlocking the mutex here.
- */
if (mvm->tz_device.tzone) {
struct iwl_mvm_thermal_device *tz_dev = &mvm->tz_device;
- mutex_unlock(&mvm->mutex);
thermal_notify_framework(tz_dev->tzone,
tz_dev->fw_trips_index[ths_crossed]);
- mutex_lock(&mvm->mutex);
}
#endif /* CONFIG_THERMAL */
}
@@ -368,16 +359,14 @@ static void iwl_mvm_tt_smps_iterator(void *_data, u8 *mac,
static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
{
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int i, err;
for (i = 0; i < IWL_MVM_STATION_COUNT; i++) {
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, i);
+ if (!mvmsta)
continue;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+
if (enable == mvmsta->tt_tx_protection)
continue;
err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
@@ -796,9 +785,6 @@ static int iwl_mvm_tcool_get_cur_state(struct thermal_cooling_device *cdev,
{
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
- if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
- return -EBUSY;
-
*state = mvm->cooling_dev.cur_state;
return 0;
@@ -813,9 +799,6 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR))
return -EIO;
- if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
- return -EBUSY;
-
mutex_lock(&mvm->mutex);
if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 34731e29c..779bafcbc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -67,6 +67,7 @@
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include "iwl-trans.h"
#include "iwl-eeprom-parse.h"
@@ -98,6 +99,111 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
addr, tid, ssn);
}
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+ u8 protocol = 0;
+
+ /*
+ * Do not compute checksum if already computed or if transport will
+ * compute it
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ return;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST &&
+ protocol != NEXTHDR_FRAGMENT) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ if (protocol == NEXTHDR_FRAGMENT) {
+ struct frag_hdr *hp =
+ OPT_HDR(struct frag_hdr, skb, off);
+
+ protocol = hp->nexthdr;
+ off += sizeof(struct frag_hdr);
+ } else {
+ struct ipv6_opt_hdr *hp =
+ OPT_HDR(struct ipv6_opt_hdr, skb, off);
+
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ return;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /*
+ * Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+ /* mac header len should include IV, size is in words */
+ if (info->control.hw_key)
+ mh_len += info->control.hw_key->iv_len;
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -127,6 +233,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ tx_cmd->offload_assist |=
+ cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
@@ -187,9 +296,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)skb_info->driver_data[0]);
- tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
+
+ /* padding is inserted later in transport */
+ if (ieee80211_hdrlen(fc) % 4 &&
+ !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+ tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+ iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
}
/*
@@ -245,7 +360,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
&mvm->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -258,7 +373,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
- if (info->band == IEEE80211_BAND_2GHZ &&
+ if (info->band == NL80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
@@ -360,6 +475,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return dev_cmd;
}
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (info->control.vif->type == NL80211_IFTYPE_AP &&
+ ieee80211_is_probe_resp(fc))
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ else if (ieee80211_is_mgmt(fc) &&
+ info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ }
+
+ return info->hw_queue;
+}
+
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -369,6 +499,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int queue;
memcpy(&info, skb->cb, sizeof(info));
@@ -393,6 +524,8 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+ queue = info.hw_queue;
+
/*
* If the interface on which the frame is sent is the P2P_DEVICE
* or an AP/GO interface use the broadcast station associated
@@ -408,10 +541,12 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP)
+ info.control.vif->type == NL80211_IFTYPE_AP) {
sta_id = mvmvif->bcast_sta.sta_id;
- else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- is_multicast_ether_addr(hdr->addr1)) {
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+ hdr->frame_control);
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT)
@@ -419,7 +554,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
}
}
- IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd)
@@ -430,7 +565,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
- if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
}
@@ -463,6 +598,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
+ netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -473,15 +609,30 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
+ dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
- !mvmsta->tlc_amsdu) {
+ (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
num_subframes = 1;
pad = 0;
goto segment;
}
/*
+ * Do not build AMSDU for IPv6 with extension headers.
+ * ask stack to segment and checkum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ num_subframes = 1;
+ pad = 0;
+ netdev_features &= ~NETIF_F_CSUM_MASK;
+ goto segment;
+ }
+
+ /*
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
@@ -493,7 +644,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
max_amsdu_len = sta->max_amsdu_len;
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
@@ -507,7 +657,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
- if (dbg_max_amsdu_len)
+ if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
dbg_max_amsdu_len);
@@ -575,7 +725,7 @@ segment:
skb_shinfo(skb)->gso_size = num_subframes * mss;
memcpy(cb, skb->cb, sizeof(cb));
- next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+ next = skb_gso_segment(skb, netdev_features);
skb_shinfo(skb)->gso_size = mss;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
@@ -641,6 +791,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta, u8 tid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 mac_queue = info->hw_queue;
+ struct sk_buff_head *deferred_tx_frames;
+
+ lockdep_assert_held(&mvm_sta->lock);
+
+ mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+ set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+ deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+ skb_queue_tail(deferred_tx_frames, skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC queues, so we
+ * should never get a second deferred frame for the RA/TID.
+ */
+ if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+ "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+ skb_queue_len(deferred_tx_frames))) {
+ iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+ schedule_work(&mvm->add_stream_wk);
+ }
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -656,7 +835,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue;
- bool is_data_qos = false, is_ampdu = false;
+ bool is_ampdu = false;
int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -697,8 +876,15 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ (ieee80211_is_qos_nullfunc(fc) ||
+ ieee80211_is_nullfunc(fc))) {
+ /*
+ * nullfunc frames should go to the MGMT queue regardless of QOS
+ */
+ tid = IWL_MAX_TID_COUNT;
+ txq_id = mvmsta->tid_data[tid].txq_id;
}
/* Copy MAC header from skb into command buffer */
@@ -719,18 +905,36 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id;
}
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (unlikely(mvmsta->tid_data[tid].txq_id ==
+ IEEE80211_INVAL_HW_QUEUE)) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
+ }
+
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ }
+
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
- if (is_data_qos && !ieee80211_has_morefrags(fc))
+ if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock);
- if (txq_id < mvm->first_agg_queue)
+ /* Increase pending frames count if this isn't AMPDU */
+ if (!is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -883,7 +1087,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
#endif /* CONFIG_IWLWIFI_DEBUG */
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_tx_rate *r)
{
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
@@ -978,6 +1182,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
+ bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs);
@@ -1108,6 +1313,8 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
+ txq_agg = (mvmsta->tid_data[tid].state == IWL_AGG_ON);
+
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm,
@@ -1163,11 +1370,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id >= mvm->first_agg_queue)
+ if (txq_agg)
goto out;
/* We can't free more than one frame at once on a shared queue */
- WARN_ON(skb_freed > 1);
+ WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@@ -1261,9 +1468,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int queue = SEQ_TO_QUEUE(sequence);
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
+ if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
+ (!iwl_mvm_is_dqa_supported(mvm) ||
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -1273,10 +1483,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
rcu_read_lock();
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
- if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].tx_time =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 53cdc5760..161b99efd 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -90,11 +90,17 @@ int iwl_mvm_send_cmd(struct iwl_mvm *mvm, struct iwl_host_cmd *cmd)
* the mutex, this ensures we don't try to send two
* (or more) synchronous commands at a time.
*/
- if (!(cmd->flags & CMD_ASYNC))
+ if (!(cmd->flags & CMD_ASYNC)) {
lockdep_assert_held(&mvm->mutex);
+ if (!(cmd->flags & CMD_SEND_IN_IDLE))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SENDING_CMD);
+ }
ret = iwl_trans_send_cmd(mvm->trans, cmd);
+ if (!(cmd->flags & (CMD_ASYNC | CMD_SEND_IN_IDLE)))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SENDING_CMD);
+
/*
* If the caller wants the SKB, then don't hide any problems, the
* caller might access the response buffer which will be NULL if
@@ -217,14 +223,14 @@ static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
};
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
int idx;
int band_offset = 0;
/* Legacy rate format, search for match in table */
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
if (fw_rate_idx_to_plcp[idx] == rate)
@@ -491,98 +497,12 @@ static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | isr status reg\n", table.nic_isr_pref);
}
-static void iwl_mvm_dump_nic_error_log_old(struct iwl_mvm *mvm)
-{
- struct iwl_trans *trans = mvm->trans;
- struct iwl_error_event_table_v1 table;
- u32 base;
-
- base = mvm->error_event_table;
- if (mvm->cur_ucode == IWL_UCODE_INIT) {
- if (!base)
- base = mvm->fw->init_errlog_ptr;
- } else {
- if (!base)
- base = mvm->fw->inst_errlog_ptr;
- }
-
- if (base < 0x800000) {
- IWL_ERR(mvm,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base,
- (mvm->cur_ucode == IWL_UCODE_INIT)
- ? "Init" : "RT");
- return;
- }
-
- iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
-
- if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- IWL_ERR(trans, "Start IWL Error Log Dump:\n");
- IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
- mvm->status, table.valid);
- }
-
- /* Do not change this output - scripts rely on it */
-
- IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
-
- trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
- table.data1, table.data2, table.data3,
- table.blink2, table.ilink1, table.ilink2,
- table.bcon_time, table.gp1, table.gp2,
- table.gp3, table.ucode_ver, 0,
- table.hw_ver, table.brd_ver);
- IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
- desc_lookup(table.error_id));
- IWL_ERR(mvm, "0x%08X | uPc\n", table.pc);
- IWL_ERR(mvm, "0x%08X | branchlink1\n", table.blink1);
- IWL_ERR(mvm, "0x%08X | branchlink2\n", table.blink2);
- IWL_ERR(mvm, "0x%08X | interruptlink1\n", table.ilink1);
- IWL_ERR(mvm, "0x%08X | interruptlink2\n", table.ilink2);
- IWL_ERR(mvm, "0x%08X | data1\n", table.data1);
- IWL_ERR(mvm, "0x%08X | data2\n", table.data2);
- IWL_ERR(mvm, "0x%08X | data3\n", table.data3);
- IWL_ERR(mvm, "0x%08X | beacon time\n", table.bcon_time);
- IWL_ERR(mvm, "0x%08X | tsf low\n", table.tsf_low);
- IWL_ERR(mvm, "0x%08X | tsf hi\n", table.tsf_hi);
- IWL_ERR(mvm, "0x%08X | time gp1\n", table.gp1);
- IWL_ERR(mvm, "0x%08X | time gp2\n", table.gp2);
- IWL_ERR(mvm, "0x%08X | time gp3\n", table.gp3);
- IWL_ERR(mvm, "0x%08X | uCode version\n", table.ucode_ver);
- IWL_ERR(mvm, "0x%08X | hw version\n", table.hw_ver);
- IWL_ERR(mvm, "0x%08X | board version\n", table.brd_ver);
- IWL_ERR(mvm, "0x%08X | hcmd\n", table.hcmd);
- IWL_ERR(mvm, "0x%08X | isr0\n", table.isr0);
- IWL_ERR(mvm, "0x%08X | isr1\n", table.isr1);
- IWL_ERR(mvm, "0x%08X | isr2\n", table.isr2);
- IWL_ERR(mvm, "0x%08X | isr3\n", table.isr3);
- IWL_ERR(mvm, "0x%08X | isr4\n", table.isr4);
- IWL_ERR(mvm, "0x%08X | isr_pref\n", table.isr_pref);
- IWL_ERR(mvm, "0x%08X | wait_event\n", table.wait_event);
- IWL_ERR(mvm, "0x%08X | l2p_control\n", table.l2p_control);
- IWL_ERR(mvm, "0x%08X | l2p_duration\n", table.l2p_duration);
- IWL_ERR(mvm, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
- IWL_ERR(mvm, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
- IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
- IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
- IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
-
- if (mvm->support_umac_log)
- iwl_mvm_dump_umac_error_log(mvm);
-}
-
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
struct iwl_error_event_table table;
u32 base;
- if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_NEW_VERSION)) {
- iwl_mvm_dump_nic_error_log_old(mvm);
- return;
- }
-
base = mvm->error_event_table;
if (mvm->cur_ucode == IWL_UCODE_INIT) {
if (!base)
@@ -667,12 +587,45 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 minq, u8 maxq)
for (i = minq; i <= maxq; i++)
if (mvm->queue_info[i].hw_queue_refcount == 0 &&
- !mvm->queue_info[i].setup_reserved)
+ mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
return i;
return -ENOSPC;
}
+int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id,
+ int tid, int frame_limit, u16 ssn)
+{
+ struct iwl_scd_txq_cfg_cmd cmd = {
+ .scd_queue = queue,
+ .enable = 1,
+ .window = frame_limit,
+ .sta_id = sta_id,
+ .ssn = cpu_to_le16(ssn),
+ .tx_fifo = fifo,
+ .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
+ queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
+ .tid = tid,
+ };
+ int ret;
+
+ spin_lock_bh(&mvm->queue_info_lock);
+ if (WARN(mvm->queue_info[queue].hw_queue_refcount == 0,
+ "Trying to reconfig unallocated queue %d\n", queue)) {
+ spin_unlock_bh(&mvm->queue_info_lock);
+ return -ENXIO;
+ }
+ spin_unlock_bh(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
+ WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
+ queue, fifo, ret);
+
+ return ret;
+}
+
void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg,
unsigned int wdg_timeout)
@@ -694,6 +647,8 @@ void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount++;
if (mvm->queue_info[queue].hw_queue_refcount > 1)
enable_queue = false;
+ else
+ mvm->queue_info[queue].ra_sta_id = cfg->sta_id;
mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid);
IWL_DEBUG_TX_QUEUES(mvm,
@@ -766,6 +721,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
mvm->queue_info[queue].hw_queue_refcount--;
cmd.enable = mvm->queue_info[queue].hw_queue_refcount ? 1 : 0;
+ if (!cmd.enable)
+ mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
IWL_DEBUG_TX_QUEUES(mvm,
"Disabling TXQ #%d refcount=%d (mac80211 map:0x%x)\n",
@@ -779,6 +736,8 @@ void iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue,
return;
}
+ cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
+
/* Make sure queue info is correct even though we overwrite it */
WARN(mvm->queue_info[queue].hw_queue_refcount ||
mvm->queue_info[queue].tid_bitmap ||
@@ -1079,3 +1038,74 @@ void iwl_mvm_connection_loss(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
out:
ieee80211_connection_loss(vif);
}
+
+int iwl_mvm_send_lqm_cmd(struct ieee80211_vif *vif,
+ enum iwl_lqm_cmd_operatrions operation,
+ u32 duration, u32 timeout)
+{
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_link_qual_msrmnt_cmd cmd = {
+ .cmd_operation = cpu_to_le32(operation),
+ .mac_id = cpu_to_le32(mvm_vif->id),
+ .measurement_time = cpu_to_le32(duration),
+ .timeout = cpu_to_le32(timeout),
+ };
+ u32 cmdid =
+ iwl_cmd_id(LINK_QUALITY_MEASUREMENT_CMD, MAC_CONF_GROUP, 0);
+ int ret;
+
+ if (!fw_has_capa(&mvm_vif->mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LQM_SUPPORT))
+ return -EOPNOTSUPP;
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return -EINVAL;
+
+ switch (operation) {
+ case LQM_CMD_OPERATION_START_MEASUREMENT:
+ if (iwl_mvm_lqm_active(mvm_vif->mvm))
+ return -EBUSY;
+ if (!vif->bss_conf.assoc)
+ return -EINVAL;
+ mvm_vif->lqm_active = true;
+ break;
+ case LQM_CMD_OPERATION_STOP_MEASUREMENT:
+ if (!iwl_mvm_lqm_active(mvm_vif->mvm))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm_vif->mvm, cmdid, 0, sizeof(cmd),
+ &cmd);
+
+ /* command failed - roll back lqm_active state */
+ if (ret) {
+ mvm_vif->lqm_active =
+ operation == LQM_CMD_OPERATION_STOP_MEASUREMENT;
+ }
+
+ return ret;
+}
+
+static void iwl_mvm_lqm_active_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvm_vif = iwl_mvm_vif_from_mac80211(vif);
+ bool *lqm_active = _data;
+
+ *lqm_active = *lqm_active || mvm_vif->lqm_active;
+}
+
+bool iwl_mvm_lqm_active(struct iwl_mvm *mvm)
+{
+ bool ret = false;
+
+ lockdep_assert_held(&mvm->mutex);
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_lqm_active_iterator, &ret);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 79d7cd7d4..a588b05e3 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -493,17 +493,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
/* 9000 Series */
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl5165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
@@ -593,6 +596,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ const struct iwl_cfg *cfg_9260lc __maybe_unused = NULL;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
int ret;
@@ -620,6 +624,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cfg = cfg_7265d;
iwl_trans->cfg = cfg_7265d;
}
+
+ if (iwl_trans->cfg->rf_id) {
+ if (cfg == &iwl9260_2ac_cfg)
+ cfg_9260lc = &iwl9260lc_2ac_cfg;
+ if (cfg_9260lc && iwl_trans->hw_rf_id == CSR_HW_RF_ID_TYPE_LC) {
+ cfg = cfg_9260lc;
+ iwl_trans->cfg = cfg_9260lc;
+ }
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
@@ -661,10 +674,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* The PCI device starts with a reference taken and we are
* supposed to release it here. But to simplify the
* interaction with the opmode, we don't do it now, but let
- * the opmode release it when it's ready. To account for this
- * reference, we start with ref_count set to 1.
+ * the opmode release it when it's ready.
*/
- trans_pcie->ref_count = 1;
return 0;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index dadafbdef..de6974f9c 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -348,7 +348,7 @@ struct iwl_tso_hdr_page {
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
- struct iwl_rx_mem_buffer *global_table[MQ_RX_TABLE_SIZE];
+ struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
struct iwl_trans *trans;
struct iwl_drv *drv;
@@ -403,10 +403,6 @@ struct iwl_trans_pcie {
bool cmd_hold_nic_awake;
bool ref_cmd_in_flight;
- /* protect ref counter */
- spinlock_t ref_lock;
- u32 ref_count;
-
dma_addr_t fw_mon_phys;
struct page *fw_mon_page;
u32 fw_mon_size;
@@ -485,9 +481,6 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
-void iwl_trans_pcie_ref(struct iwl_trans *trans);
-void iwl_trans_pcie_unref(struct iwl_trans *trans);
-
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 4be3c35af..0a4a3c502 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8));
}
-static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val)
+static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
+ u64 val)
{
- iwl_write_prph(trans, ofs, val & 0xffffffff);
- iwl_write_prph(trans, ofs + 4, val >> 32);
+ iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
+ iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
}
/*
@@ -208,10 +209,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->write_actual = round_down(rxq->write, 8);
if (trans->cfg->mq_rx_supported)
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id),
- rxq->write_actual);
- else
- iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
+ iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
+ rxq->write_actual);
+ /*
+ * write to FH_RSCSR_CHNL0_WPTR register even in MQ as a W/A to
+ * hardware shadow registers bug - writing to RFH_Q_FRBDCB_WIDX will
+ * not wake the NIC.
+ */
+ iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
}
static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
@@ -694,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size;
+ unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) {
@@ -711,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* reset and flush pointers */
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
+ iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
+ iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ rxq->rb_stts_dma >> 4);
/* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
@@ -737,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
* RB timeout 0x10
* 256 RBDs
*/
- iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- rb_size|
- (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+ iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
+ FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ rb_size |
+ (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -757,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0;
+ unsigned long flags;
int i;
switch (trans_pcie->rx_buf_size) {
@@ -774,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
}
+ if (!iwl_trans_grab_nic_access(trans, &flags))
+ return;
+
/* Stop Rx DMA */
- iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
/* disable free amd used rx queue operation */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_FRBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i),
- (u64)(trans_pcie->rxq[i].used_bd_dma));
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBDCB_BA_LSB(i),
+ trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */
- iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i),
- trans_pcie->rxq[i].rb_stts_dma);
+ iwl_pcie_write_prph_64_no_grab(trans,
+ RFH_Q_URBD_STTS_WPTR_LSB(i),
+ trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */
- iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0);
- iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
+ iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
enabled |= BIT(i) | BIT(i + 16);
}
@@ -808,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Drop frames that exceed RB size
* 512 RBDs
*/
- iwl_write_prph(trans, RFH_RXF_DMA_CFG,
- RFH_DMA_EN_ENABLE_VAL |
- rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
- RFH_RXF_DMA_MIN_RB_4_8 |
- RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
- RFH_RXF_DMA_RBDCB_SIZE_512);
+ iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
+ RFH_DMA_EN_ENABLE_VAL |
+ rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
+ RFH_RXF_DMA_MIN_RB_4_8 |
+ RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
+ RFH_RXF_DMA_RBDCB_SIZE_512);
/*
* Activate DMA snooping.
* Set RX DMA chunk size to 64B
* Default queue is 0
*/
- iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
- (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
- RFH_GEN_CFG_SERVICE_DMA_SNOOP);
+ iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
+ (DEFAULT_RXQ_NUM <<
+ RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
+ RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */
- iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+ iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
+
+ iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
@@ -908,6 +929,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
allocator_pool_size = trans->num_rx_queues *
(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
num_alloc = queue_size + allocator_pool_size;
+ BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) !=
+ ARRAY_SIZE(trans_pcie->rx_pool));
for (i = 0; i < num_alloc; i++) {
struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
@@ -1292,7 +1315,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
- iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
+ iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/*
@@ -1805,19 +1828,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
struct msix_entry *entry = dev_id;
struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
struct iwl_trans *trans = trans_pcie->trans;
- struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats;
+ struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta_fh, inta_hw;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
spin_lock(&trans_pcie->irq_lock);
- inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
- inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
+ inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
+ inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
/*
* Clear causes registers to avoid being handling the same cause.
*/
- iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
- iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
+ iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh);
+ iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
spin_unlock(&trans_pcie->irq_lock);
if (unlikely(!(inta_fh | inta_hw))) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index b2b79354d..f603d7830 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -269,9 +269,8 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
/* Configure analog phase-lock-loop before activating to D0A */
- if (trans->cfg->base_params->pll_cfg_val)
- iwl_set_bit(trans, CSR_ANA_PLL_CFG,
- trans->cfg->base_params->pll_cfg_val);
+ if (trans->cfg->base_params->pll_cfg)
+ iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
/*
* Set "initialization complete" bit to move adapter from
@@ -361,8 +360,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/*
* Set "initialization complete" bit to move adapter from
@@ -408,8 +406,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
* SHRD_HW_RST). Turn MAC off before proceeding.
*/
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
@@ -506,8 +503,7 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
+ usleep_range(1000, 2000);
/*
* Clear "initialization complete" bit to move adapter from
@@ -586,7 +582,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
- msleep(1);
+ usleep_range(1000, 2000);
for (iter = 0; iter < 10; iter++) {
/* If HW is not ready, prepare the conditions to check again */
@@ -1074,7 +1070,7 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
+ usleep_range(1000, 2000);
/*
* Upon stop, the APM issues an interrupt if HW RF kill is set.
@@ -1321,6 +1317,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
* after this call.
*/
iwl_pcie_reset_ict(trans);
+ iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
@@ -1434,7 +1431,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
int ret, i;
if (trans->cfg->mq_rx_supported) {
- max_vector = min_t(u32, (num_possible_cpus() + 1),
+ max_vector = min_t(u32, (num_possible_cpus() + 2),
IWL_MAX_RX_HW_QUEUES);
for (i = 0; i < max_vector; i++)
trans_pcie->msix_entries[i].entry = i;
@@ -1465,7 +1462,7 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
ret = pci_enable_msi(pdev);
if (ret) {
- dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
+ dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
/* enable rfkill interrupt: hw bug w/a */
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -1499,8 +1496,8 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
IWL_ERR(trans_pcie->trans,
"Error allocating IRQ %d\n", i);
for (j = 0; j < i; j++)
- free_irq(trans_pcie->msix_entries[i].vector,
- &trans_pcie->msix_entries[i]);
+ free_irq(trans_pcie->msix_entries[j].vector,
+ &trans_pcie->msix_entries[j]);
pci_disable_msix(pdev);
return ret;
}
@@ -1525,8 +1522,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
/* Reset the entire device */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- usleep_range(10, 15);
+ usleep_range(1000, 2000);
iwl_pcie_apm_init(trans);
@@ -1694,6 +1690,7 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
}
free_percpu(trans_pcie->tso_hdr_page);
+ mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);
}
@@ -1948,7 +1945,7 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
"WR pointer moved while flushing %d -> %d\n",
wr_ptr, write_ptr))
return -ETIMEDOUT;
- msleep(1);
+ usleep_range(1000, 2000);
}
if (q->read_ptr != q->write_ptr) {
@@ -2011,41 +2008,35 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-void iwl_trans_pcie_ref(struct iwl_trans *trans)
+static void iwl_trans_pcie_ref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable)
return;
- spin_lock_irqsave(&trans_pcie->ref_lock, flags);
- IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
- trans_pcie->ref_count++;
pm_runtime_get(&trans_pcie->pci_dev->dev);
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+
+#ifdef CONFIG_PM
+ IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+ atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
}
-void iwl_trans_pcie_unref(struct iwl_trans *trans)
+static void iwl_trans_pcie_unref(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
if (iwlwifi_mod_params.d0i3_disable)
return;
- spin_lock_irqsave(&trans_pcie->ref_lock, flags);
- IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
- if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
- return;
- }
- trans_pcie->ref_count--;
-
pm_runtime_mark_last_busy(&trans_pcie->pci_dev->dev);
pm_runtime_put_autosuspend(&trans_pcie->pci_dev->dev);
- spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
+#ifdef CONFIG_PM
+ IWL_DEBUG_RPM(trans, "runtime usage count: %d\n",
+ atomic_read(&trans_pcie->pci_dev->dev.power.usage_count));
+#endif /* CONFIG_PM */
}
static const char *get_csr_string(int cmd)
@@ -2793,7 +2784,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans_pcie->trans = trans;
spin_lock_init(&trans_pcie->irq_lock);
spin_lock_init(&trans_pcie->reg_lock);
- spin_lock_init(&trans_pcie->ref_lock);
mutex_init(&trans_pcie->mutex);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
@@ -2912,6 +2902,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
}
+ trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
+
iwl_pcie_set_interrupt_capa(pdev, trans);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 16ad820ca..d6beac9af 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -32,6 +32,7 @@
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/pm_runtime.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
@@ -596,6 +597,28 @@ static void iwl_pcie_free_tso_page(struct sk_buff *skb)
}
}
+static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ lockdep_assert_held(&trans_pcie->reg_lock);
+
+ if (trans_pcie->ref_cmd_in_flight) {
+ trans_pcie->ref_cmd_in_flight = false;
+ IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
+ iwl_trans_unref(trans);
+ }
+
+ if (!trans->cfg->base_params->apmg_wake_up_wa)
+ return;
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+ return;
+
+ trans_pcie->cmd_hold_nic_awake = false;
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
@@ -620,6 +643,20 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
}
iwl_pcie_txq_free_tfd(trans, txq);
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr);
+
+ if (q->read_ptr == q->write_ptr) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ if (txq_id != trans_pcie->cmd_queue) {
+ IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
+ q->id);
+ iwl_trans_unref(trans);
+ } else {
+ iwl_pcie_clear_cmd_in_flight(trans);
+ }
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
}
txq->active = false;
@@ -1098,7 +1135,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
if (q->read_ptr == q->write_ptr) {
IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", q->id);
- iwl_trans_pcie_unref(trans);
+ iwl_trans_unref(trans);
}
out:
@@ -1117,7 +1154,7 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
!trans_pcie->ref_cmd_in_flight) {
trans_pcie->ref_cmd_in_flight = true;
IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
- iwl_trans_pcie_ref(trans);
+ iwl_trans_ref(trans);
}
/*
@@ -1148,29 +1185,6 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0;
}
-static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- lockdep_assert_held(&trans_pcie->reg_lock);
-
- if (trans_pcie->ref_cmd_in_flight) {
- trans_pcie->ref_cmd_in_flight = false;
- IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
- iwl_trans_pcie_unref(trans);
- }
-
- if (trans->cfg->base_params->apmg_wake_up_wa) {
- if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
- return 0;
-
- trans_pcie->cmd_hold_nic_awake = false;
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- }
- return 0;
-}
-
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -1786,6 +1800,16 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
iwl_get_cmd_string(trans, cmd->id));
+ if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
+ ret = wait_event_timeout(trans_pcie->d0i3_waitq,
+ pm_runtime_active(&trans_pcie->pci_dev->dev),
+ msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
+ if (!ret) {
+ IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
+ return -ETIMEDOUT;
+ }
+ }
+
cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
if (cmd_idx < 0) {
ret = cmd_idx;
@@ -2197,6 +2221,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
__le16 fc;
u8 hdr_len;
u16 wifi_seq;
+ bool amsdu;
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
@@ -2288,11 +2313,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
- tb1_len = ALIGN(len, 4);
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (tb1_len != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+ amsdu = ieee80211_is_data_qos(fc) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+ if (trans_pcie->sw_csum_tx || !amsdu) {
+ tb1_len = ALIGN(len, 4);
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (tb1_len != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+ } else {
+ tb1_len = len;
+ }
/* The first TB points to the scratchbuf data - min_copy bytes */
memcpy(&txq->scratchbufs[q->write_ptr], &dev_cmd->hdr,
@@ -2310,8 +2342,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
goto out_err;
iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
- if (ieee80211_is_data_qos(fc) &&
- (*ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_A_MSDU_PRESENT)) {
+ if (amsdu) {
if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
out_meta, dev_cmd,
tb1_len)))
@@ -2342,7 +2373,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->frozen_expiry_remainder = txq->wd_timeout;
}
IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
- iwl_trans_pcie_ref(trans);
+ iwl_trans_ref(trans);
}
/* Tell device the write index *just past* this latest filled TFD */
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 515aa3f99..a8a9bd8e1 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -1794,7 +1794,7 @@ static int prism2_transmit(struct net_device *dev, int idx)
netif_wake_queue(dev);
return -1;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Since we did not wait for command completion, the card continues
* to process on the background and we will finish handling when
diff --git a/drivers/net/wireless/intersil/orinoco/cfg.c b/drivers/net/wireless/intersil/orinoco/cfg.c
index 0f6ea316e..7aa47069a 100644
--- a/drivers/net/wireless/intersil/orinoco/cfg.c
+++ b/drivers/net/wireless/intersil/orinoco/cfg.c
@@ -60,14 +60,14 @@ int orinoco_wiphy_register(struct wiphy *wiphy)
if (priv->channel_mask & (1 << i)) {
priv->channels[i].center_freq =
ieee80211_channel_to_frequency(i + 1,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
channels++;
}
}
priv->band.channels = priv->channels;
priv->band.n_channels = channels;
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
i = 0;
@@ -175,7 +175,7 @@ static int orinoco_set_monitor_channel(struct wiphy *wiphy,
if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT)
return -EINVAL;
- if (chandef->chan->band != IEEE80211_BAND_2GHZ)
+ if (chandef->chan->band != NL80211_BAND_2GHZ)
return -EINVAL;
channel = ieee80211_frequency_to_channel(chandef->chan->center_freq);
diff --git a/drivers/net/wireless/intersil/orinoco/hw.c b/drivers/net/wireless/intersil/orinoco/hw.c
index e27e32851..61af5a28f 100644
--- a/drivers/net/wireless/intersil/orinoco/hw.c
+++ b/drivers/net/wireless/intersil/orinoco/hw.c
@@ -1193,7 +1193,7 @@ int orinoco_hw_get_freq(struct orinoco_private *priv)
goto out;
}
- freq = ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
out:
orinoco_unlock(priv, &flags);
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 7b5c55432..7afe2004e 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -1794,7 +1794,7 @@ void orinoco_reset(struct work_struct *work)
printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n",
dev->name, err);
} else
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
orinoco_unlock_irq(priv);
diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
index 977298ad9..892174318 100644
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -1275,7 +1275,7 @@ static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev)
goto busy;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
stats->tx_bytes += skb->len;
goto ok;
diff --git a/drivers/net/wireless/intersil/orinoco/scan.c b/drivers/net/wireless/intersil/orinoco/scan.c
index 2c66166ad..d0ceb06c7 100644
--- a/drivers/net/wireless/intersil/orinoco/scan.c
+++ b/drivers/net/wireless/intersil/orinoco/scan.c
@@ -111,7 +111,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
}
freq = ieee80211_channel_to_frequency(
- le16_to_cpu(bss->a.channel), IEEE80211_BAND_2GHZ);
+ le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
if (!channel) {
printk(KERN_DEBUG "Invalid channel designation %04X(%04X)",
@@ -148,7 +148,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie_len = len - sizeof(*bss);
ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len);
chan = ie ? ie[2] : 0;
- freq = ieee80211_channel_to_frequency(chan, IEEE80211_BAND_2GHZ);
+ freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ);
channel = ieee80211_get_channel(wiphy, freq);
timestamp = le64_to_cpu(bss->timestamp);
diff --git a/drivers/net/wireless/intersil/p54/eeprom.c b/drivers/net/wireless/intersil/p54/eeprom.c
index 2fe713eda..d4c73d393 100644
--- a/drivers/net/wireless/intersil/p54/eeprom.c
+++ b/drivers/net/wireless/intersil/p54/eeprom.c
@@ -76,14 +76,14 @@ struct p54_channel_entry {
u16 data;
int index;
int max_power;
- enum ieee80211_band band;
+ enum nl80211_band band;
};
struct p54_channel_list {
struct p54_channel_entry *channels;
size_t entries;
size_t max_entries;
- size_t band_channel_num[IEEE80211_NUM_BANDS];
+ size_t band_channel_num[NUM_NL80211_BANDS];
};
static int p54_get_band_from_freq(u16 freq)
@@ -91,10 +91,10 @@ static int p54_get_band_from_freq(u16 freq)
/* FIXME: sync these values with the 802.11 spec */
if ((freq >= 2412) && (freq <= 2484))
- return IEEE80211_BAND_2GHZ;
+ return NL80211_BAND_2GHZ;
if ((freq >= 4920) && (freq <= 5825))
- return IEEE80211_BAND_5GHZ;
+ return NL80211_BAND_5GHZ;
return -1;
}
@@ -124,16 +124,16 @@ static int p54_compare_rssichan(const void *_a,
static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
struct ieee80211_supported_band *band_entry,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
/* TODO: generate rate array dynamically */
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
band_entry->bitrates = p54_bgrates;
band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
band_entry->bitrates = p54_arates;
band_entry->n_bitrates = ARRAY_SIZE(p54_arates);
break;
@@ -147,7 +147,7 @@ static int p54_fill_band_bitrates(struct ieee80211_hw *dev,
static int p54_generate_band(struct ieee80211_hw *dev,
struct p54_channel_list *list,
unsigned int *chan_num,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
struct p54_common *priv = dev->priv;
struct ieee80211_supported_band *tmp, *old;
@@ -206,7 +206,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
if (j == 0) {
wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n",
- (band == IEEE80211_BAND_2GHZ) ? 2 : 5);
+ (band == NL80211_BAND_2GHZ) ? 2 : 5);
ret = -ENODATA;
goto err_out;
@@ -396,7 +396,7 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
p54_compare_channels, NULL);
k = 0;
- for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
+ for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) {
if (p54_generate_band(dev, list, &k, i) == 0)
j++;
}
@@ -573,10 +573,10 @@ static int p54_parse_rssical(struct ieee80211_hw *dev,
for (i = 0; i < entries; i++) {
u16 freq = 0;
switch (i) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
freq = 2437;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
freq = 5240;
break;
}
@@ -902,11 +902,11 @@ good_eeprom:
if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW)
p54_init_xbow_synth(priv);
if (!(synth & PDR_SYNTH_24_GHZ_DISABLED))
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] =
- priv->band_table[IEEE80211_BAND_2GHZ];
+ dev->wiphy->bands[NL80211_BAND_2GHZ] =
+ priv->band_table[NL80211_BAND_2GHZ];
if (!(synth & PDR_SYNTH_5_GHZ_DISABLED))
- dev->wiphy->bands[IEEE80211_BAND_5GHZ] =
- priv->band_table[IEEE80211_BAND_5GHZ];
+ dev->wiphy->bands[NL80211_BAND_5GHZ] =
+ priv->band_table[NL80211_BAND_5GHZ];
if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED)
priv->rx_diversity_mask = 3;
if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED)
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 7805864e7..d5a3bf91a 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -477,7 +477,7 @@ static void p54_bss_info_changed(struct ieee80211_hw *dev,
p54_set_edcf(priv);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
- if (dev->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ)
priv->basic_rate_mask = (info->basic_rates << 4);
else
priv->basic_rate_mask = info->basic_rates;
@@ -829,7 +829,7 @@ void p54_free_common(struct ieee80211_hw *dev)
struct p54_common *priv = dev->priv;
unsigned int i;
- for (i = 0; i < IEEE80211_NUM_BANDS; i++)
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
kfree(priv->band_table[i]);
kfree(priv->iq_autocal);
diff --git a/drivers/net/wireless/intersil/p54/p54.h b/drivers/net/wireless/intersil/p54/p54.h
index 40b401ed6..529939e61 100644
--- a/drivers/net/wireless/intersil/p54/p54.h
+++ b/drivers/net/wireless/intersil/p54/p54.h
@@ -223,7 +223,7 @@ struct p54_common {
struct p54_cal_database *curve_data;
struct p54_cal_database *output_limit;
struct p54_cal_database *rssi_db;
- struct ieee80211_supported_band *band_table[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band *band_table[NUM_NL80211_BANDS];
/* BBP/MAC state */
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/intersil/p54/txrx.c b/drivers/net/wireless/intersil/p54/txrx.c
index 24e5ff9a9..1af7da0b3 100644
--- a/drivers/net/wireless/intersil/p54/txrx.c
+++ b/drivers/net/wireless/intersil/p54/txrx.c
@@ -353,7 +353,7 @@ static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb)
rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi);
if (hdr->rate & 0x10)
rx_status->flag |= RX_FLAG_SHORTPRE;
- if (priv->hw->conf.chandef.chan->band == IEEE80211_BAND_5GHZ)
+ if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
rx_status->rate_idx = (rate < 4) ? 0 : rate - 4;
else
rx_status->rate_idx = rate;
@@ -867,7 +867,7 @@ void p54_tx_80211(struct ieee80211_hw *dev,
for (i = 0; i < nrates && ridx < 8; i++) {
/* we register the rates in perfect order */
rate = info->control.rates[i].idx;
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate += 4;
/* store the count we actually calculated for TX status */
diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c
index 333c1a2f8..6700387ef 100644
--- a/drivers/net/wireless/intersil/prism54/isl_38xx.c
+++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/delay.h>
+#include <linux/ktime.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -113,7 +114,7 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
#if VERBOSE > SHOW_ERROR_MESSAGES
u32 counter = 0;
- struct timeval current_time;
+ struct timespec64 current_ts64;
DEBUG(SHOW_FUNCTION_CALLS, "isl38xx trigger device\n");
#endif
@@ -121,22 +122,22 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
if (asleep) {
/* device is in powersave, trigger the device for wakeup */
#if VERBOSE > SHOW_ERROR_MESSAGES
- do_gettimeofday(&current_time);
- DEBUG(SHOW_TRACING, "%08li.%08li Device wakeup triggered\n",
- current_time.tv_sec, (long)current_time.tv_usec);
+ ktime_get_real_ts64(&current_ts64);
+ DEBUG(SHOW_TRACING, "%lld.%09ld Device wakeup triggered\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
- DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ DEBUG(SHOW_TRACING, "%lld.%09ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
#endif
reg = readl(device_base + ISL38XX_INT_IDENT_REG);
if (reg == 0xabadface) {
#if VERBOSE > SHOW_ERROR_MESSAGES
- do_gettimeofday(&current_time);
+ ktime_get_real_ts64(&current_ts64);
DEBUG(SHOW_TRACING,
- "%08li.%08li Device register abadface\n",
- current_time.tv_sec, (long)current_time.tv_usec);
+ "%lld.%09ld Device register abadface\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec);
#endif
/* read the Device Status Register until Sleepmode bit is set */
while (reg = readl(device_base + ISL38XX_CTRL_STAT_REG),
@@ -149,13 +150,13 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
#if VERBOSE > SHOW_ERROR_MESSAGES
DEBUG(SHOW_TRACING,
- "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ "%lld.%09ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
readl(device_base + ISL38XX_CTRL_STAT_REG));
- do_gettimeofday(&current_time);
+ ktime_get_real_ts64(&current_ts64);
DEBUG(SHOW_TRACING,
- "%08li.%08li Device asleep counter %i\n",
- current_time.tv_sec, (long)current_time.tv_usec,
+ "%lld.%09ld Device asleep counter %i\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec,
counter);
#endif
}
@@ -168,9 +169,9 @@ isl38xx_trigger_device(int asleep, void __iomem *device_base)
/* perform another read on the Device Status Register */
reg = readl(device_base + ISL38XX_CTRL_STAT_REG);
- do_gettimeofday(&current_time);
- DEBUG(SHOW_TRACING, "%08li.%08li Device register read %08x\n",
- current_time.tv_sec, (long)current_time.tv_usec, reg);
+ ktime_get_real_ts64(&current_ts64);
+ DEBUG(SHOW_TRACING, "%lld.%00ld Device register read %08x\n",
+ (s64)current_ts64.tv_sec, current_ts64.tv_nsec, reg);
#endif
} else {
/* device is (still) awake */
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 06664baa4..4dd5adcdd 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -255,14 +255,14 @@ static struct class *hwsim_class;
static struct net_device *hwsim_mon; /* global monitor netdev */
#define CHAN2G(_freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
.max_power = 20, \
}
#define CHAN5G(_freq) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_freq), \
.max_power = 20, \
@@ -479,7 +479,7 @@ struct mac80211_hwsim_data {
struct list_head list;
struct ieee80211_hw *hw;
struct device *dev;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)];
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
@@ -1030,7 +1030,7 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw,
data->pending_cookie++;
cookie = data->pending_cookie;
info->rate_driver_data[0] = (void *)cookie;
- if (nla_put_u64(skb, HWSIM_ATTR_COOKIE, cookie))
+ if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD))
goto nla_put_failure;
genlmsg_end(skb, msg_head);
@@ -1909,6 +1909,7 @@ static void hw_scan_work(struct work_struct *work)
/* send probes */
for (i = 0; i < req->n_ssids; i++) {
struct sk_buff *probe;
+ struct ieee80211_mgmt *mgmt;
probe = ieee80211_probereq_get(hwsim->hw,
hwsim->scan_addr,
@@ -1918,6 +1919,10 @@ static void hw_scan_work(struct work_struct *work)
if (!probe)
continue;
+ mgmt = (struct ieee80211_mgmt *) probe->data;
+ memcpy(mgmt->da, req->bssid, ETH_ALEN);
+ memcpy(mgmt->bssid, req->bssid, ETH_ALEN);
+
if (req->ie_len)
memcpy(skb_put(probe, req->ie_len), req->ie,
req->ie_len);
@@ -2342,7 +2347,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
u8 addr[ETH_ALEN];
struct mac80211_hwsim_data *data;
struct ieee80211_hw *hw;
- enum ieee80211_band band;
+ enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
int idx;
@@ -2471,16 +2476,16 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sizeof(hwsim_channels_5ghz));
memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates));
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
sband->channels = data->channels_2ghz;
sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz);
sband->bitrates = data->rates;
sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
sband->channels = data->channels_5ghz;
sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz);
sband->bitrates = data->rates + 4;
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 66e1c73bd..39f22467c 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -148,6 +148,7 @@ enum {
HWSIM_ATTR_RADIO_NAME,
HWSIM_ATTR_NO_VIF,
HWSIM_ATTR_FREQ,
+ HWSIM_ATTR_PAD,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c
index 2eea76a34..776b44bfd 100644
--- a/drivers/net/wireless/marvell/libertas/cfg.c
+++ b/drivers/net/wireless/marvell/libertas/cfg.c
@@ -23,7 +23,7 @@
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -639,7 +639,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
if (chan_no != -1) {
struct wiphy *wiphy = priv->wdev->wiphy;
int freq = ieee80211_channel_to_frequency(chan_no,
- IEEE80211_BAND_2GHZ);
+ NL80211_BAND_2GHZ);
struct ieee80211_channel *channel =
ieee80211_get_channel(wiphy, freq);
@@ -1266,7 +1266,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
{
struct cfg80211_scan_request *creq = NULL;
int i, n_channels = ieee80211_get_num_supported_channels(wiphy);
- enum ieee80211_band band;
+ enum nl80211_band band;
creq = kzalloc(sizeof(*creq) + sizeof(struct cfg80211_ssid) +
n_channels * sizeof(void *),
@@ -1281,7 +1281,7 @@ _new_connect_scan_req(struct wiphy *wiphy, struct cfg80211_connect_params *sme)
/* Scan all available channels */
i = 0;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
int j;
if (!wiphy->bands[band])
@@ -2200,7 +2200,7 @@ int lbs_cfg_register(struct lbs_private *priv)
if (lbs_mesh_activated(priv))
wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MESH_POINT);
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
+ wdev->wiphy->bands[NL80211_BAND_2GHZ] = &lbs_band_2ghz;
/*
* We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index 4ddd0e5a6..301170ccc 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -743,7 +743,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
struct cmd_ds_802_11d_domain_info cmd;
struct mrvl_ie_domain_param_set *domain = &cmd.domain;
struct ieee80211_country_ie_triplet *t;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_channel *ch;
u8 num_triplet = 0;
u8 num_parsed_chan = 0;
@@ -777,7 +777,7 @@ int lbs_set_11d_domain_info(struct lbs_private *priv)
* etc.
*/
for (band = 0;
- (band < IEEE80211_NUM_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
+ (band < NUM_NL80211_BANDS) && (num_triplet < MAX_11D_TRIPLETS);
band++) {
if (!bands[band])
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index a47f0acc0..0bf8916a0 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -570,7 +570,7 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
if (!(prxpd->status & cpu_to_le16(MRVDRV_RXPD_STATUS_OK)))
stats.flag |= RX_FLAG_FAILED_FCS_CRC;
stats.freq = priv->cur_freq;
- stats.band = IEEE80211_BAND_2GHZ;
+ stats.band = NL80211_BAND_2GHZ;
stats.signal = prxpd->snr;
priv->noise = prxpd->nf;
/* Marvell rate index has a hole at value 4 */
@@ -642,7 +642,7 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.bitrates = priv->rates;
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 09578c6cd..a74cc43b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -59,7 +59,10 @@ static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
skb->len);
}
- ret = mwifiex_recv_packet(priv, rx_skb);
+ if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
+ ret = mwifiex_uap_recv_packet(priv, rx_skb);
+ else
+ ret = mwifiex_recv_packet(priv, rx_skb);
if (ret == -1)
mwifiex_dbg(priv->adapter, ERROR,
"Rx of A-MSDU failed");
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index bb7235e1b..ff948a922 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -474,7 +474,7 @@ int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy)
u8 no_of_parsed_chan = 0;
u8 first_chan = 0, next_chan = 0, max_pwr = 0;
u8 i, flag = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
@@ -1410,7 +1410,7 @@ mwifiex_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
struct mwifiex_chan_stats *pchan_stats = priv->adapter->chan_stats;
- enum ieee80211_band band;
+ enum nl80211_band band;
mwifiex_dbg(priv->adapter, DUMP, "dump_survey idx=%d\n", idx);
@@ -1586,7 +1586,7 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
u16 bitmap_rates[MAX_BITMAP_RATES_SIZE];
- enum ieee80211_band band;
+ enum nl80211_band band;
struct mwifiex_adapter *adapter = priv->adapter;
if (!priv->media_connected) {
@@ -1600,11 +1600,11 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
memset(bitmap_rates, 0, sizeof(bitmap_rates));
/* Fill HR/DSSS rates. */
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
bitmap_rates[0] = mask->control[band].legacy & 0x000f;
/* Fill OFDM rates */
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
bitmap_rates[1] = (mask->control[band].legacy & 0x0ff0) >> 4;
else
bitmap_rates[1] = mask->control[band].legacy;
@@ -1771,7 +1771,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
} else {
struct ieee80211_sta_ht_cap *ht_info;
int rx_mcs_supp;
- enum ieee80211_band band;
+ enum nl80211_band band;
if ((tx_ant == 0x1 && rx_ant == 0x1)) {
adapter->user_dev_mcs_support = HT_STREAM_1X1;
@@ -1785,7 +1785,7 @@ mwifiex_cfg80211_set_antenna(struct wiphy *wiphy, u32 tx_ant, u32 rx_ant)
MWIFIEX_11AC_MCS_MAP_2X2;
}
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!adapter->wiphy->bands[band])
continue;
@@ -1997,7 +1997,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
- enum ieee80211_band band;
+ enum nl80211_band band;
if (mwifiex_get_bss_info(priv, &bss_info))
return -1;
@@ -2271,7 +2271,7 @@ static int mwifiex_set_ibss_params(struct mwifiex_private *priv,
int index = 0, i;
u8 config_bands = 0;
- if (params->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (params->chandef.chan->band == NL80211_BAND_2GHZ) {
if (!params->basic_rates) {
config_bands = BAND_B | BAND_G;
} else {
@@ -2859,18 +2859,18 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_init_priv_params(priv, dev);
priv->netdev = dev;
- mwifiex_setup_ht_caps(&wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap, priv);
+ mwifiex_setup_ht_caps(&wiphy->bands[NL80211_BAND_2GHZ]->ht_cap, priv);
if (adapter->is_hw_11ac_capable)
mwifiex_setup_vht_caps(
- &wiphy->bands[IEEE80211_BAND_2GHZ]->vht_cap, priv);
+ &wiphy->bands[NL80211_BAND_2GHZ]->vht_cap, priv);
if (adapter->config_bands & BAND_A)
mwifiex_setup_ht_caps(
- &wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap, priv);
+ &wiphy->bands[NL80211_BAND_5GHZ]->ht_cap, priv);
if ((adapter->config_bands & BAND_A) && adapter->is_hw_11ac_capable)
mwifiex_setup_vht_caps(
- &wiphy->bands[IEEE80211_BAND_5GHZ]->vht_cap, priv);
+ &wiphy->bands[NL80211_BAND_5GHZ]->vht_cap, priv);
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &priv->wdev;
@@ -3272,8 +3272,11 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv && priv->netdev) {
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
+ }
}
for (i = 0; i < retry_num; i++) {
@@ -3341,13 +3344,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
struct mwifiex_ds_wakeup_reason wakeup_reason;
struct cfg80211_wowlan_wakeup wakeup_report;
int i;
+ bool report_wakeup_reason = true;
for (i = 0; i < adapter->priv_num; i++) {
priv = adapter->priv[i];
- if (priv && priv->netdev)
+ if (priv && priv->netdev) {
+ if (!netif_carrier_ok(priv->netdev))
+ netif_carrier_on(priv->netdev);
mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
+ }
}
+ if (!wiphy->wowlan_config)
+ goto done;
+
priv = mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
mwifiex_get_wakeup_reason(priv, HostCmd_ACT_GEN_GET, MWIFIEX_SYNC_CMD,
&wakeup_reason);
@@ -3380,19 +3390,20 @@ static int mwifiex_cfg80211_resume(struct wiphy *wiphy)
if (wiphy->wowlan_config->n_patterns)
wakeup_report.pattern_idx = 1;
break;
- case CONTROL_FRAME_MATCHED:
- break;
- case MANAGEMENT_FRAME_MATCHED:
+ case GTK_REKEY_FAILURE:
+ if (wiphy->wowlan_config->gtk_rekey_failure)
+ wakeup_report.gtk_rekey_failure = true;
break;
default:
+ report_wakeup_reason = false;
break;
}
- if ((wakeup_reason.hs_wakeup_reason > 0) &&
- (wakeup_reason.hs_wakeup_reason <= 7))
+ if (report_wakeup_reason)
cfg80211_report_wowlan_wakeup(&priv->wdev, &wakeup_report,
GFP_KERNEL);
+done:
if (adapter->nd_info) {
for (i = 0 ; i < adapter->nd_info->n_matches ; i++)
kfree(adapter->nd_info->matches[i]);
@@ -3410,6 +3421,16 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
device_set_wakeup_enable(adapter->dev, enabled);
}
+
+static int mwifiex_set_rekey_data(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ return mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+ HostCmd_ACT_GEN_SET, 0, data, true);
+}
+
#endif
static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
@@ -3801,7 +3822,7 @@ static int mwifiex_cfg80211_get_channel(struct wiphy *wiphy,
struct ieee80211_channel *chan;
u8 second_chan_offset;
enum nl80211_channel_type chan_type;
- enum ieee80211_band band;
+ enum nl80211_band band;
int freq;
int ret = -ENODATA;
@@ -3932,6 +3953,7 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.suspend = mwifiex_cfg80211_suspend,
.resume = mwifiex_cfg80211_resume,
.set_wakeup = mwifiex_cfg80211_set_wakeup,
+ .set_rekey_data = mwifiex_set_rekey_data,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
.tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
@@ -3948,7 +3970,8 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_NET_DETECT,
+ WIPHY_WOWLAN_NET_DETECT | WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
.n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
@@ -4031,11 +4054,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_AP);
- wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz;
+ wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
if (adapter->config_bands & BAND_A)
- wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz;
+ wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
else
- wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ wiphy->bands[NL80211_BAND_5GHZ] = NULL;
if (adapter->drcs_enabled && ISSUPP_DRCS_ENABLED(adapter->fw_cap_info))
wiphy->iface_combinations = &mwifiex_iface_comb_ap_sta_drcs;
@@ -4086,6 +4109,7 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->features |= NL80211_FEATURE_HT_IBSS |
NL80211_FEATURE_INACTIVITY_TIMER |
+ NL80211_FEATURE_LOW_PRIORITY_SCAN |
NL80211_FEATURE_NEED_OBSS_SCAN;
if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
index 09fae2714..1ff22055e 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfp.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
@@ -322,9 +322,9 @@ mwifiex_get_cfp(struct mwifiex_private *priv, u8 band, u16 channel, u32 freq)
return cfp;
if (mwifiex_band_to_radio_type(band) == HostCmd_SCAN_RADIO_TYPE_BG)
- sband = priv->wdev.wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = priv->wdev.wiphy->bands[NL80211_BAND_2GHZ];
else
- sband = priv->wdev.wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = priv->wdev.wiphy->bands[NL80211_BAND_5GHZ];
if (!sband) {
mwifiex_dbg(priv->adapter, ERROR,
@@ -399,15 +399,15 @@ u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
int i;
if (radio_type) {
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
if (WARN_ON_ONCE(!sband))
return 0;
- rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+ rate_mask = request->rates[NL80211_BAND_5GHZ];
} else {
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
if (WARN_ON_ONCE(!sband))
return 0;
- rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+ rate_mask = request->rates[NL80211_BAND_2GHZ];
}
num_rates = 0;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index a12adee77..6bc2011d8 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -105,6 +105,47 @@ mwifiex_clean_cmd_node(struct mwifiex_adapter *adapter,
}
/*
+ * This function returns a command to the command free queue.
+ *
+ * The function also calls the completion callback if required, before
+ * cleaning the command node and re-inserting it into the free queue.
+ */
+static void
+mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node)
+{
+ unsigned long flags;
+
+ if (!cmd_node)
+ return;
+
+ if (cmd_node->wait_q_enabled)
+ mwifiex_complete_cmd(adapter, cmd_node);
+ /* Clean the node */
+ mwifiex_clean_cmd_node(adapter, cmd_node);
+
+ /* Insert node into cmd_free_q */
+ spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
+ list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
+ spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
+}
+
+/* This function reuses a command node. */
+void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
+ struct cmd_ctrl_node *cmd_node)
+{
+ struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
+
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+
+ atomic_dec(&adapter->cmd_pending);
+ mwifiex_dbg(adapter, CMD,
+ "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
+ le16_to_cpu(host_cmd->command),
+ atomic_read(&adapter->cmd_pending));
+}
+
+/*
* This function sends a host command to the firmware.
*
* The function copies the host command into the driver command
@@ -614,47 +655,6 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
}
/*
- * This function returns a command to the command free queue.
- *
- * The function also calls the completion callback if required, before
- * cleaning the command node and re-inserting it into the free queue.
- */
-void
-mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node)
-{
- unsigned long flags;
-
- if (!cmd_node)
- return;
-
- if (cmd_node->wait_q_enabled)
- mwifiex_complete_cmd(adapter, cmd_node);
- /* Clean the node */
- mwifiex_clean_cmd_node(adapter, cmd_node);
-
- /* Insert node into cmd_free_q */
- spin_lock_irqsave(&adapter->cmd_free_q_lock, flags);
- list_add_tail(&cmd_node->list, &adapter->cmd_free_q);
- spin_unlock_irqrestore(&adapter->cmd_free_q_lock, flags);
-}
-
-/* This function reuses a command node. */
-void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node)
-{
- struct host_cmd_ds_command *host_cmd = (void *)cmd_node->cmd_skb->data;
-
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
-
- atomic_dec(&adapter->cmd_pending);
- mwifiex_dbg(adapter, CMD,
- "cmd: FREE_CMD: cmd=%#x, cmd_pending=%d\n",
- le16_to_cpu(host_cmd->command),
- atomic_read(&adapter->cmd_pending));
-}
-
-/*
* This function queues a command to the command pending queue.
*
* This in effect adds the command to the command list to be executed.
@@ -991,6 +991,23 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
adapter->if_ops.card_reset(adapter);
}
+void
+mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter)
+{
+ struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
+ unsigned long flags;
+
+ /* Cancel all pending scan command */
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ list_for_each_entry_safe(cmd_node, tmp_node,
+ &adapter->scan_pending_q, list) {
+ list_del(&cmd_node->list);
+ cmd_node->wait_q_enabled = false;
+ mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
+ }
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+}
+
/*
* This function cancels all the pending commands.
*
@@ -1009,9 +1026,9 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
/* Cancel current cmd */
if ((adapter->curr_cmd) && (adapter->curr_cmd->wait_q_enabled)) {
- adapter->curr_cmd->wait_q_enabled = false;
adapter->cmd_wait_q.status = -1;
mwifiex_complete_cmd(adapter, adapter->curr_cmd);
+ adapter->curr_cmd->wait_q_enabled = false;
/* no recycle probably wait for response */
}
/* Cancel all pending command */
@@ -1029,16 +1046,7 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
spin_unlock_irqrestore(&adapter->cmd_pending_q_lock, flags);
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, cmd_flags);
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
-
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
@@ -1070,9 +1078,8 @@ mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter)
void
mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
{
- struct cmd_ctrl_node *cmd_node = NULL, *tmp_node = NULL;
+ struct cmd_ctrl_node *cmd_node = NULL;
unsigned long cmd_flags;
- unsigned long scan_pending_q_flags;
struct mwifiex_private *priv;
int i;
@@ -1094,17 +1101,7 @@ mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter)
mwifiex_recycle_cmd_node(adapter, cmd_node);
}
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- scan_pending_q_flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
if (adapter->scan_processing) {
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, cmd_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index c134cf865..8e4145abd 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -372,6 +372,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
+#define HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG 0x010f
#define HostCmd_CMD_11AC_CFG 0x0112
#define HostCmd_CMD_HS_WAKEUP_REASON 0x0116
#define HostCmd_CMD_TDLS_CONFIG 0x0100
@@ -619,6 +620,7 @@ enum HS_WAKEUP_REASON {
MAGIC_PATTERN_MATCHED,
CONTROL_FRAME_MATCHED,
MANAGEMENT_FRAME_MATCHED,
+ GTK_REKEY_FAILURE,
RESERVED
};
@@ -2183,6 +2185,14 @@ struct host_cmd_ds_wakeup_reason {
u16 wakeup_reason;
} __packed;
+struct host_cmd_ds_gtk_rekey_params {
+ __le16 action;
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KEK_LEN];
+ __le32 replay_ctr_low;
+ __le32 replay_ctr_high;
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -2256,6 +2266,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_multi_chan_policy mc_policy;
struct host_cmd_ds_robust_coex coex;
struct host_cmd_ds_wakeup_reason hs_wakeup_reason;
+ struct host_cmd_ds_gtk_rekey_params rekey;
} params;
} __packed;
diff --git a/drivers/net/wireless/marvell/mwifiex/init.c b/drivers/net/wireless/marvell/mwifiex/init.c
index 517653b3a..78c532f0d 100644
--- a/drivers/net/wireless/marvell/mwifiex/init.c
+++ b/drivers/net/wireless/marvell/mwifiex/init.c
@@ -317,7 +317,7 @@ void mwifiex_set_trans_start(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++)
netdev_get_tx_queue(dev, i)->trans_start = jiffies;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 2d40cb76c..4dde1ae35 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -702,6 +702,13 @@ mwifiex_close(struct net_device *dev)
priv->scan_aborting = true;
}
+ if (priv->sched_scanning) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "aborting bgscan on ndo_stop\n");
+ mwifiex_stop_bg_scan(priv);
+ cfg80211_sched_scan_stopped(priv->wdev.wiphy);
+ }
+
return 0;
}
@@ -753,13 +760,6 @@ int mwifiex_queue_tx_pkt(struct mwifiex_private *priv, struct sk_buff *skb)
mwifiex_queue_main_work(priv->adapter);
- if (priv->sched_scanning) {
- mwifiex_dbg(priv->adapter, INFO,
- "aborting bgscan on ndo_stop\n");
- mwifiex_stop_bg_scan(priv);
- cfg80211_sched_scan_stopped(priv->wdev.wiphy);
- }
-
return 0;
}
@@ -1074,12 +1074,14 @@ void mwifiex_drv_info_dump(struct mwifiex_adapter *adapter)
priv->netdev->name, priv->num_tx_timeout);
}
- if (adapter->iface_type == MWIFIEX_SDIO) {
- p += sprintf(p, "\n=== SDIO register dump===\n");
+ if (adapter->iface_type == MWIFIEX_SDIO ||
+ adapter->iface_type == MWIFIEX_PCIE) {
+ p += sprintf(p, "\n=== %s register dump===\n",
+ adapter->iface_type == MWIFIEX_SDIO ?
+ "SDIO" : "PCIE");
if (adapter->if_ops.reg_dump)
p += adapter->if_ops.reg_dump(adapter, p);
}
-
p += sprintf(p, "\n=== more debug information\n");
debug_info = kzalloc(sizeof(*debug_info), GFP_KERNEL);
if (debug_info) {
@@ -1432,7 +1434,7 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
struct mwifiex_private *priv = NULL;
int i;
- if (down_interruptible(sem))
+ if (down_trylock(sem))
goto exit_sem_err;
if (!adapter)
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index aafc4ab4e..0207af00b 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -37,6 +37,17 @@
#include <linux/idr.h>
#include <linux/inetdevice.h>
#include <linux/devcoredump.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
#include "decl.h"
#include "ioctl.h"
@@ -100,8 +111,8 @@ enum {
#define SCAN_BEACON_ENTRY_PAD 6
#define MWIFIEX_PASSIVE_SCAN_CHAN_TIME 110
-#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 30
-#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 30
+#define MWIFIEX_ACTIVE_SCAN_CHAN_TIME 40
+#define MWIFIEX_SPECIFIC_SCAN_CHAN_TIME 40
#define MWIFIEX_DEF_SCAN_CHAN_GAP_TIME 50
#define SCAN_RSSI(RSSI) (0x100 - ((u8)(RSSI)))
@@ -1019,6 +1030,8 @@ int mwifiex_shutdown_fw_complete(struct mwifiex_adapter *adapter);
int mwifiex_dnld_fw(struct mwifiex_adapter *, struct mwifiex_fw_image *);
int mwifiex_recv_packet(struct mwifiex_private *priv, struct sk_buff *skb);
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+ struct sk_buff *skb);
int mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
struct sk_buff *skb);
@@ -1040,9 +1053,8 @@ int mwifiex_alloc_cmd_buffer(struct mwifiex_adapter *adapter);
int mwifiex_free_cmd_buffer(struct mwifiex_adapter *adapter);
void mwifiex_cancel_all_pending_cmd(struct mwifiex_adapter *adapter);
void mwifiex_cancel_pending_ioctl(struct mwifiex_adapter *adapter);
+void mwifiex_cancel_pending_scan_cmd(struct mwifiex_adapter *adapter);
-void mwifiex_insert_cmd_to_free_q(struct mwifiex_adapter *adapter,
- struct cmd_ctrl_node *cmd_node);
void mwifiex_recycle_cmd_node(struct mwifiex_adapter *adapter,
struct cmd_ctrl_node *cmd_node);
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index 106717dd8..0c7937eb6 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -190,7 +190,6 @@ static int mwifiex_pcie_probe(struct pci_dev *pdev,
if (ent->driver_data) {
struct mwifiex_pcie_device *data = (void *)ent->driver_data;
- card->pcie.firmware = data->firmware;
card->pcie.reg = data->reg;
card->pcie.blksz_fw_dl = data->blksz_fw_dl;
card->pcie.tx_buf_size = data->tx_buf_size;
@@ -269,6 +268,11 @@ static const struct pci_device_id mwifiex_ids[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
.driver_data = (unsigned long)&mwifiex_pcie8997,
},
+ {
+ PCIE_VENDOR_ID_V2_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8997,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ .driver_data = (unsigned long)&mwifiex_pcie8997,
+ },
{},
};
@@ -2351,6 +2355,47 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
return 0;
}
+/* Function to dump PCIE scratch registers in case of FW crash
+ */
+static int
+mwifiex_pcie_reg_dump(struct mwifiex_adapter *adapter, char *drv_buf)
+{
+ char *p = drv_buf;
+ char buf[256], *ptr;
+ int i;
+ u32 value;
+ struct pcie_service_card *card = adapter->card;
+ const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int pcie_scratch_reg[] = {PCIE_SCRATCH_12_REG,
+ PCIE_SCRATCH_13_REG,
+ PCIE_SCRATCH_14_REG};
+
+ if (!p)
+ return 0;
+
+ mwifiex_dbg(adapter, MSG, "PCIE register dump start\n");
+
+ if (mwifiex_read_reg(adapter, reg->fw_status, &value)) {
+ mwifiex_dbg(adapter, ERROR, "failed to read firmware status");
+ return 0;
+ }
+
+ ptr = buf;
+ mwifiex_dbg(adapter, MSG, "pcie scratch register:");
+ for (i = 0; i < ARRAY_SIZE(pcie_scratch_reg); i++) {
+ mwifiex_read_reg(adapter, pcie_scratch_reg[i], &value);
+ ptr += sprintf(ptr, "reg:0x%x, value=0x%x\n",
+ pcie_scratch_reg[i], value);
+ }
+
+ mwifiex_dbg(adapter, MSG, "%s\n", buf);
+ p += sprintf(p, "%s\n", buf);
+
+ mwifiex_dbg(adapter, MSG, "PCIE register dump end\n");
+
+ return p - drv_buf;
+}
+
/* This function read/write firmware */
static enum rdwr_status
mwifiex_pcie_rdwr_firmware(struct mwifiex_adapter *adapter, u8 doneflag)
@@ -2759,6 +2804,68 @@ static int mwifiex_pcie_request_irq(struct mwifiex_adapter *adapter)
}
/*
+ * This function get firmare name for downloading by revision id
+ *
+ * Read revision id register to get revision id
+ */
+static void mwifiex_pcie_get_fw_name(struct mwifiex_adapter *adapter)
+{
+ int revision_id = 0;
+ int version;
+ struct pcie_service_card *card = adapter->card;
+
+ switch (card->dev->device) {
+ case PCIE_DEVICE_ID_MARVELL_88W8766P:
+ strcpy(adapter->fw_name, PCIE8766_DEFAULT_FW_NAME);
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8897:
+ mwifiex_write_reg(adapter, 0x0c58, 0x80c00000);
+ mwifiex_read_reg(adapter, 0x0c58, &revision_id);
+ revision_id &= 0xff00;
+ switch (revision_id) {
+ case PCIE8897_A0:
+ strcpy(adapter->fw_name, PCIE8897_A0_FW_NAME);
+ break;
+ case PCIE8897_B0:
+ strcpy(adapter->fw_name, PCIE8897_B0_FW_NAME);
+ break;
+ default:
+ strcpy(adapter->fw_name, PCIE8897_DEFAULT_FW_NAME);
+
+ break;
+ }
+ break;
+ case PCIE_DEVICE_ID_MARVELL_88W8997:
+ mwifiex_read_reg(adapter, 0x0c48, &revision_id);
+ mwifiex_read_reg(adapter, 0x0cd0, &version);
+ version &= 0x7;
+ switch (revision_id) {
+ case PCIE8997_V2:
+ if (version == CHIP_VER_PCIEUSB)
+ strcpy(adapter->fw_name,
+ PCIEUSB8997_FW_NAME_V2);
+ else
+ strcpy(adapter->fw_name,
+ PCIEUART8997_FW_NAME_V2);
+ break;
+ case PCIE8997_Z:
+ if (version == CHIP_VER_PCIEUSB)
+ strcpy(adapter->fw_name,
+ PCIEUSB8997_FW_NAME_Z);
+ else
+ strcpy(adapter->fw_name,
+ PCIEUART8997_FW_NAME_Z);
+ break;
+ default:
+ strcpy(adapter->fw_name, PCIE8997_DEFAULT_FW_NAME);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/*
* This function registers the PCIE device.
*
* PCIE IRQ is claimed, block size is set and driver data is initialized.
@@ -2778,8 +2885,8 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
adapter->tx_buf_size = card->pcie.tx_buf_size;
adapter->mem_type_mapping_tbl = card->pcie.mem_type_mapping_tbl;
adapter->num_mem_types = card->pcie.num_mem_types;
- strcpy(adapter->fw_name, card->pcie.firmware);
adapter->ext_scan = card->pcie.can_ext_scan;
+ mwifiex_pcie_get_fw_name(adapter);
return 0;
}
@@ -2850,6 +2957,7 @@ static struct mwifiex_if_ops pcie_ops = {
.cleanup_mpa_buf = NULL,
.init_fw_port = mwifiex_pcie_init_fw_port,
.clean_pcie_ring = mwifiex_clean_pcie_ring_buf,
+ .reg_dump = mwifiex_pcie_reg_dump,
.device_dump = mwifiex_pcie_device_dump,
};
@@ -2907,4 +3015,3 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex PCI-Express Driver version " PCIE_VERSION);
MODULE_VERSION(PCIE_VERSION);
MODULE_LICENSE("GPL v2");
-/*(DEBLOBBED)*/
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.h b/drivers/net/wireless/marvell/mwifiex/pcie.h
index e55002806..db18ac6f5 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.h
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.h
@@ -23,7 +23,6 @@
#define _MWIFIEX_PCIE_H
#include <linux/pci.h>
-#include <linux/pcieport_if.h>
#include <linux/interrupt.h>
#include "decl.h"
@@ -31,13 +30,26 @@
#define PCIE8766_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
#define PCIE8897_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIE8897_A0_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIE8897_B0_FW_NAME "/*(DEBLOBBED)*/"
#define PCIE8997_DEFAULT_FW_NAME "/*(DEBLOBBED)*/"
+#define PCIEUART8997_FW_NAME_Z "/*(DEBLOBBED)*/"
+#define PCIEUART8997_FW_NAME_V2 "/*(DEBLOBBED)*/"
+#define PCIEUSB8997_FW_NAME_Z "/*(DEBLOBBED)*/"
+#define PCIEUSB8997_FW_NAME_V2 "/*(DEBLOBBED)*/"
#define PCIE_VENDOR_ID_MARVELL (0x11ab)
+#define PCIE_VENDOR_ID_V2_MARVELL (0x1b4b)
#define PCIE_DEVICE_ID_MARVELL_88W8766P (0x2b30)
#define PCIE_DEVICE_ID_MARVELL_88W8897 (0x2b38)
#define PCIE_DEVICE_ID_MARVELL_88W8997 (0x2b42)
+#define PCIE8897_A0 0x1100
+#define PCIE8897_B0 0x1200
+#define PCIE8997_Z 0x0
+#define PCIE8997_V2 0x471
+#define CHIP_VER_PCIEUSB 0x2
+
/* Constants for Buffer Descriptor (BD) rings */
#define MWIFIEX_MAX_TXRX_BD 0x20
#define MWIFIEX_TXBD_MASK 0x3F
@@ -65,6 +77,8 @@
#define PCIE_SCRATCH_10_REG 0xCE8
#define PCIE_SCRATCH_11_REG 0xCEC
#define PCIE_SCRATCH_12_REG 0xCF0
+#define PCIE_SCRATCH_13_REG 0xCF8
+#define PCIE_SCRATCH_14_REG 0xCFC
#define PCIE_RD_DATA_PTR_Q0_Q1 0xC08C
#define PCIE_WR_DATA_PTR_Q0_Q1 0xC05C
@@ -102,7 +116,7 @@
/* FW awake cookie after FW ready */
#define FW_AWAKE_COOKIE (0xAA55AA55)
#define MWIFIEX_DEF_SLEEP_COOKIE 0xBEEFBEEF
-#define MWIFIEX_MAX_DELAY_COUNT 5
+#define MWIFIEX_MAX_DELAY_COUNT 100
struct mwifiex_pcie_card_reg {
u16 cmd_addr_lo;
@@ -263,7 +277,6 @@ static struct memory_type_mapping mem_type_mapping_tbl_w8997[] = {
};
struct mwifiex_pcie_device {
- const char *firmware;
const struct mwifiex_pcie_card_reg *reg;
u16 blksz_fw_dl;
u16 tx_buf_size;
@@ -274,7 +287,6 @@ struct mwifiex_pcie_device {
};
static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
- .firmware = PCIE8766_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8766,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K,
@@ -283,7 +295,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8766 = {
};
static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
- .firmware = PCIE8897_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8897,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
@@ -294,7 +305,6 @@ static const struct mwifiex_pcie_device mwifiex_pcie8897 = {
};
static const struct mwifiex_pcie_device mwifiex_pcie8997 = {
- .firmware = PCIE8997_DEFAULT_FW_NAME,
.reg = &mwifiex_reg_8997,
.blksz_fw_dl = MWIFIEX_PCIE_BLOCK_SIZE_FW_DNLD,
.tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K,
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 489f7a911..bc5e52ceb 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -76,6 +76,39 @@ static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = {
{ 0x00, 0x0f, 0xac, 0x04 }, /* AES */
};
+static void
+_dbg_security_flags(int log_level, const char *func, const char *desc,
+ struct mwifiex_private *priv,
+ struct mwifiex_bssdescriptor *bss_desc)
+{
+ _mwifiex_dbg(priv->adapter, log_level,
+ "info: %s: %s:\twpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\tEncMode=%#x privacy=%#x\n",
+ func, desc,
+ bss_desc->bcn_wpa_ie ?
+ bss_desc->bcn_wpa_ie->vend_hdr.element_id : 0,
+ bss_desc->bcn_rsn_ie ?
+ bss_desc->bcn_rsn_ie->ieee_hdr.element_id : 0,
+ priv->sec_info.wep_enabled ? "e" : "d",
+ priv->sec_info.wpa_enabled ? "e" : "d",
+ priv->sec_info.wpa2_enabled ? "e" : "d",
+ priv->sec_info.encryption_mode,
+ bss_desc->privacy);
+}
+#define dbg_security_flags(mask, desc, priv, bss_desc) \
+ _dbg_security_flags(MWIFIEX_DBG_##mask, desc, __func__, priv, bss_desc)
+
+static bool
+has_ieee_hdr(struct ieee_types_generic *ie, u8 key)
+{
+ return (ie && ie->ieee_hdr.element_id == key);
+}
+
+static bool
+has_vendor_hdr(struct ieee_types_vendor_specific *ie, u8 key)
+{
+ return (ie && ie->vend_hdr.element_id == key);
+}
+
/*
* This function parses a given IE for a given OUI.
*
@@ -121,8 +154,7 @@ mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
- if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).
- ieee_hdr.element_id == WLAN_EID_RSN))) {
+ if (has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
iebody = (struct ie_body *)
(((u8 *) bss_desc->bcn_rsn_ie->data) +
RSN_GTK_OUI_OFFSET);
@@ -148,9 +180,7 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
struct ie_body *iebody;
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
- if (((bss_desc->bcn_wpa_ie) &&
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id ==
- WLAN_EID_VENDOR_SPECIFIC))) {
+ if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
@@ -180,11 +210,8 @@ mwifiex_is_bss_wapi(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (priv->sec_info.wapi_enabled &&
- (bss_desc->bcn_wapi_ie &&
- ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id ==
- WLAN_EID_BSS_AC_ACCESS_DELAY))) {
+ has_ieee_hdr(bss_desc->bcn_wapi_ie, WLAN_EID_BSS_AC_ACCESS_DELAY))
return true;
- }
return false;
}
@@ -197,12 +224,9 @@ mwifiex_is_bss_no_sec(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
- !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id !=
- WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id !=
- WLAN_EID_RSN)) &&
+ !priv->sec_info.wpa2_enabled &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
!priv->sec_info.encryption_mode && !bss_desc->privacy) {
return true;
}
@@ -233,29 +257,14 @@ mwifiex_is_bss_wpa(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled &&
- !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) &&
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC))
+ !priv->sec_info.wpa2_enabled &&
+ has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
) {
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: WPA:\t"
- "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "WPA", priv, bss_desc);
return true;
}
return false;
@@ -269,30 +278,14 @@ static bool
mwifiex_is_bss_wpa2(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc)
{
- if (!priv->sec_info.wep_enabled &&
- !priv->sec_info.wpa_enabled &&
+ if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
priv->sec_info.wpa2_enabled &&
- ((bss_desc->bcn_rsn_ie) &&
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) {
+ has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN)) {
/*
* Privacy bit may NOT be set in some APs like
* LinkSys WRT54G && bss_desc->privacy
*/
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: WPA2:\t"
- "wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s\t"
- "EncMode=%#x privacy=%#x\n", __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "WAP2", priv, bss_desc);
return true;
}
return false;
@@ -308,11 +301,8 @@ mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv,
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
- ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
!priv->sec_info.encryption_mode && bss_desc->privacy) {
return true;
}
@@ -329,25 +319,10 @@ mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv,
{
if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled &&
!priv->sec_info.wpa2_enabled &&
- ((!bss_desc->bcn_wpa_ie) ||
- ((*(bss_desc->bcn_wpa_ie)).
- vend_hdr.element_id != WLAN_EID_VENDOR_SPECIFIC)) &&
- ((!bss_desc->bcn_rsn_ie) ||
- ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) &&
+ !has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC) &&
+ !has_ieee_hdr(bss_desc->bcn_rsn_ie, WLAN_EID_RSN) &&
priv->sec_info.encryption_mode && bss_desc->privacy) {
- mwifiex_dbg(priv->adapter, INFO,
- "info: %s: dynamic\t"
- "WEP: wpa_ie=%#x wpa2_ie=%#x\t"
- "EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).
- vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).
- ieee_hdr.element_id : 0,
- priv->sec_info.encryption_mode,
- bss_desc->privacy);
+ dbg_security_flags(INFO, "dynamic", priv, bss_desc);
return true;
}
return false;
@@ -460,18 +435,7 @@ mwifiex_is_network_compatible(struct mwifiex_private *priv,
}
/* Security doesn't match */
- mwifiex_dbg(adapter, ERROR,
- "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s\t"
- "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n",
- __func__,
- (bss_desc->bcn_wpa_ie) ?
- (*bss_desc->bcn_wpa_ie).vend_hdr.element_id : 0,
- (bss_desc->bcn_rsn_ie) ?
- (*bss_desc->bcn_rsn_ie).ieee_hdr.element_id : 0,
- (priv->sec_info.wep_enabled) ? "e" : "d",
- (priv->sec_info.wpa_enabled) ? "e" : "d",
- (priv->sec_info.wpa2_enabled) ? "e" : "d",
- priv->sec_info.encryption_mode, bss_desc->privacy);
+ dbg_security_flags(ERROR, "failed", priv, bss_desc);
return -1;
}
@@ -494,13 +458,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
*scan_chan_list,
u8 filtered_scan)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
+ for (band = 0; (band < NUM_NL80211_BANDS) ; band++) {
if (!priv->wdev.wiphy->bands[band])
continue;
@@ -534,11 +498,13 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
&= ~MWIFIEX_PASSIVE_SCAN;
scan_chan_list[chan_idx].chan_number =
(u32) ch->hw_value;
+
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ |= MWIFIEX_DISABLE_CHAN_FILT;
+
if (filtered_scan) {
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->specific_scan_time);
- scan_chan_list[chan_idx].chan_scan_mode_bitmap
- |= MWIFIEX_DISABLE_CHAN_FILT;
}
chan_idx++;
}
@@ -557,13 +523,13 @@ mwifiex_bgscan_create_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set
*scan_chan_list)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- for (band = 0; (band < IEEE80211_NUM_BANDS); band++) {
+ for (band = 0; (band < NUM_NL80211_BANDS); band++) {
if (!priv->wdev.wiphy->bands[band])
continue;
@@ -655,8 +621,6 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
int ret = 0;
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- struct cmd_ctrl_node *cmd_node, *tmp_node;
- unsigned long flags;
u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
@@ -813,16 +777,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
sizeof(struct mwifiex_ie_types_header) + rates_size;
if (ret) {
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q,
- list) {
- list_del(&cmd_node->list);
- cmd_node->wait_q_enabled = false;
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
break;
}
}
@@ -912,14 +867,11 @@ mwifiex_config_scan(struct mwifiex_private *priv,
/* Set the BSS type scan filter, use Adapter setting if
unset */
scan_cfg_out->bss_mode =
- (user_scan_in->bss_mode ? (u8) user_scan_in->
- bss_mode : (u8) adapter->scan_mode);
+ (u8)(user_scan_in->bss_mode ?: adapter->scan_mode);
/* Set the number of probes to send, use Adapter setting
if unset */
- num_probes =
- (user_scan_in->num_probes ? user_scan_in->
- num_probes : adapter->scan_probes);
+ num_probes = user_scan_in->num_probes ?: adapter->scan_probes;
/*
* Set the BSSID filter to the incoming configuration,
@@ -1094,28 +1046,24 @@ mwifiex_config_scan(struct mwifiex_private *priv,
chan_idx++) {
channel = user_scan_in->chan_list[chan_idx].chan_number;
- (scan_chan_list + chan_idx)->chan_number = channel;
+ scan_chan_list[chan_idx].chan_number = channel;
radio_type =
user_scan_in->chan_list[chan_idx].radio_type;
- (scan_chan_list + chan_idx)->radio_type = radio_type;
+ scan_chan_list[chan_idx].radio_type = radio_type;
scan_type = user_scan_in->chan_list[chan_idx].scan_type;
if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= (MWIFIEX_PASSIVE_SCAN |
MWIFIEX_HIDDEN_SSID_REPORT);
else
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
&= ~MWIFIEX_PASSIVE_SCAN;
- if (*filtered_scan)
- (scan_chan_list +
- chan_idx)->chan_scan_mode_bitmap
- |= MWIFIEX_DISABLE_CHAN_FILT;
+ scan_chan_list[chan_idx].chan_scan_mode_bitmap
+ |= MWIFIEX_DISABLE_CHAN_FILT;
if (user_scan_in->chan_list[chan_idx].scan_time) {
scan_dur = (u16) user_scan_in->
@@ -1129,9 +1077,9 @@ mwifiex_config_scan(struct mwifiex_private *priv,
scan_dur = adapter->active_scan_time;
}
- (scan_chan_list + chan_idx)->min_scan_time =
+ scan_chan_list[chan_idx].min_scan_time =
cpu_to_le16(scan_dur);
- (scan_chan_list + chan_idx)->max_scan_time =
+ scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(scan_dur);
}
@@ -1991,12 +1939,13 @@ mwifiex_active_scan_req_for_passive_chan(struct mwifiex_private *priv)
static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
{
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node, *tmp_node;
+ struct cmd_ctrl_node *cmd_node;
unsigned long flags;
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
if (list_empty(&adapter->scan_pending_q)) {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
@@ -2018,13 +1967,10 @@ static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
}
} else if ((priv->scan_aborting && !priv->scan_request) ||
priv->scan_block) {
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- }
spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
+
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 6b281ea03..e757dfc51 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -73,6 +73,67 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = {
{"EXTLAST", NULL, 0, 0xFE},
};
+static const struct of_device_id mwifiex_sdio_of_match_table[] = {
+ { .compatible = "marvell,sd8897" },
+ { .compatible = "marvell,sd8997" },
+ { }
+};
+
+static irqreturn_t mwifiex_wake_irq_wifi(int irq, void *priv)
+{
+ struct mwifiex_plt_wake_cfg *cfg = priv;
+
+ if (cfg->irq_wifi >= 0) {
+ pr_info("%s: wake by wifi", __func__);
+ cfg->wake_by_wifi = true;
+ disable_irq_nosync(irq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* This function parse device tree node using mmc subnode devicetree API.
+ * The device node is saved in card->plt_of_node.
+ * if the device tree node exist and include interrupts attributes, this
+ * function will also request platform specific wakeup interrupt.
+ */
+static int mwifiex_sdio_probe_of(struct device *dev, struct sdio_mmc_card *card)
+{
+ struct mwifiex_plt_wake_cfg *cfg;
+ int ret;
+
+ if (!dev->of_node ||
+ !of_match_node(mwifiex_sdio_of_match_table, dev->of_node)) {
+ dev_err(dev, "sdio platform data not available\n");
+ return -1;
+ }
+
+ card->plt_of_node = dev->of_node;
+ card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg),
+ GFP_KERNEL);
+ cfg = card->plt_wake_cfg;
+ if (cfg && card->plt_of_node) {
+ cfg->irq_wifi = irq_of_parse_and_map(card->plt_of_node, 0);
+ if (!cfg->irq_wifi) {
+ dev_err(dev,
+ "fail to parse irq_wifi from device tree\n");
+ } else {
+ ret = devm_request_irq(dev, cfg->irq_wifi,
+ mwifiex_wake_irq_wifi,
+ IRQF_TRIGGER_LOW,
+ "wifi_wake", cfg);
+ if (ret) {
+ dev_err(dev,
+ "Failed to request irq_wifi %d (%d)\n",
+ cfg->irq_wifi, ret);
+ }
+ disable_irq(cfg->irq_wifi);
+ }
+ }
+
+ return 0;
+}
+
/*
* SDIO probe.
*
@@ -127,6 +188,9 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
return -EIO;
}
+ /* device tree node parsing and platform specific configuration*/
+ mwifiex_sdio_probe_of(&func->dev, card);
+
if (mwifiex_add_card(card, &add_remove_card_sem, &sdio_ops,
MWIFIEX_SDIO)) {
pr_err("%s: add card failed\n", __func__);
@@ -183,6 +247,13 @@ static int mwifiex_sdio_resume(struct device *dev)
mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
MWIFIEX_SYNC_CMD);
+ /* Disable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+ disable_irq_wake(card->plt_wake_cfg->irq_wifi);
+ if (!card->plt_wake_cfg->wake_by_wifi)
+ disable_irq(card->plt_wake_cfg->irq_wifi);
+ }
+
return 0;
}
@@ -262,6 +333,13 @@ static int mwifiex_sdio_suspend(struct device *dev)
adapter = card->adapter;
+ /* Enable platform specific wakeup interrupt */
+ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_wifi >= 0) {
+ card->plt_wake_cfg->wake_by_wifi = false;
+ enable_irq(card->plt_wake_cfg->irq_wifi);
+ enable_irq_wake(card->plt_wake_cfg->irq_wifi);
+ }
+
/* Enable the Host Sleep */
if (!mwifiex_enable_hs(adapter)) {
mwifiex_dbg(adapter, ERROR,
@@ -1026,13 +1104,12 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
offset += txlen;
} while (true);
- sdio_release_host(card->func);
-
mwifiex_dbg(adapter, MSG,
"info: FW download over, size %d bytes\n", offset);
ret = 0;
done:
+ sdio_release_host(card->func);
kfree(fwbuf);
return ret;
}
@@ -1123,8 +1200,8 @@ static void mwifiex_deaggr_sdio_pkt(struct mwifiex_adapter *adapter,
__func__, pkt_len, blk_size);
break;
}
- skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len,
- GFP_KERNEL | GFP_DMA);
+
+ skb_deaggr = mwifiex_alloc_dma_align_buf(pkt_len, GFP_KERNEL);
if (!skb_deaggr)
break;
skb_put(skb_deaggr, pkt_len);
@@ -1373,8 +1450,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
/* copy pkt to deaggr buf */
skb_deaggr = mwifiex_alloc_dma_align_buf(len_arr[pind],
- GFP_KERNEL |
- GFP_DMA);
+ GFP_KERNEL);
if (!skb_deaggr) {
mwifiex_dbg(adapter, ERROR, "skb allocation failure\t"
"drop pkt len=%d type=%d\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h
index da70fde39..df6c5e57d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.h
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.h
@@ -154,6 +154,11 @@
a->mpa_rx.start_port = 0; \
} while (0)
+struct mwifiex_plt_wake_cfg {
+ int irq_wifi;
+ bool wake_by_wifi;
+};
+
/* data structure for SDIO MPA TX */
struct mwifiex_sdio_mpa_tx {
/* multiport tx aggregation buffer pointer */
@@ -237,6 +242,8 @@ struct mwifiex_sdio_card_reg {
struct sdio_mmc_card {
struct sdio_func *func;
struct mwifiex_adapter *adapter;
+ struct device_node *plt_of_node;
+ struct mwifiex_plt_wake_cfg *plt_wake_cfg;
const char *firmware;
const struct mwifiex_sdio_card_reg *reg;
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 30f152601..e436574b1 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -1558,6 +1558,30 @@ static int mwifiex_cmd_robust_coex(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_cmd_gtk_rekey_offload(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct host_cmd_ds_gtk_rekey_params *rekey = &cmd->params.rekey;
+ u64 rekey_ctr;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG);
+ cmd->size = cpu_to_le16(sizeof(*rekey) + S_DS_GEN);
+
+ rekey->action = cpu_to_le16(cmd_action);
+ if (cmd_action == HostCmd_ACT_GEN_SET) {
+ memcpy(rekey->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(rekey->kck, data->kck, NL80211_KCK_LEN);
+ rekey_ctr = be64_to_cpup((__be64 *)data->replay_ctr);
+ rekey->replay_ctr_low = cpu_to_le32((u32)rekey_ctr);
+ rekey->replay_ctr_high =
+ cpu_to_le32((u32)((u64)rekey_ctr >> 32));
+ }
+
+ return 0;
+}
+
static int
mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd,
@@ -2094,6 +2118,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_robust_coex(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+ ret = mwifiex_cmd_gtk_rekey_offload(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
mwifiex_dbg(priv->adapter, ERROR,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
@@ -2134,6 +2162,7 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
enum state_11d_t state_11d;
struct mwifiex_ds_11n_tx_cfg tx_cfg;
u8 sdio_sp_rx_aggr_enable;
+ int data;
if (first_sta) {
if (priv->adapter->iface_type == MWIFIEX_PCIE) {
@@ -2154,9 +2183,16 @@ int mwifiex_sta_init_cmd(struct mwifiex_private *priv, u8 first_sta, bool init)
* The cal-data can be read from device tree and/or
* a configuration file and downloaded to firmware.
*/
- adapter->dt_node =
- of_find_node_by_name(NULL, "marvell_cfgdata");
- if (adapter->dt_node) {
+ if (priv->adapter->iface_type == MWIFIEX_SDIO &&
+ adapter->dev->of_node) {
+ adapter->dt_node = adapter->dev->of_node;
+ if (of_property_read_u32(adapter->dt_node,
+ "marvell,wakeup-pin",
+ &data) == 0) {
+ pr_debug("Wakeup pin = 0x%x\n", data);
+ adapter->hs_cfg.gpio = data;
+ }
+
ret = mwifiex_dnld_dt_cfgdata(priv, adapter->dt_node,
"marvell,caldata");
if (ret)
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
index d96523e10..d18c7979d 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c
@@ -44,7 +44,6 @@ static void
mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
struct host_cmd_ds_command *resp)
{
- struct cmd_ctrl_node *cmd_node = NULL, *tmp_node;
struct mwifiex_adapter *adapter = priv->adapter;
struct host_cmd_ds_802_11_ps_mode_enh *pm;
unsigned long flags;
@@ -71,17 +70,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
case HostCmd_CMD_802_11_SCAN_EXT:
- /* Cancel all pending scan command */
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- list_for_each_entry_safe(cmd_node, tmp_node,
- &adapter->scan_pending_q, list) {
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_free_q(adapter, cmd_node);
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- }
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ mwifiex_cancel_pending_scan_cmd(adapter);
spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
adapter->scan_processing = false;
@@ -1244,6 +1233,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
case HostCmd_CMD_ROBUST_COEX:
ret = mwifiex_ret_robust_coex(priv, resp, data_buf);
break;
+ case HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG:
+ break;
default:
mwifiex_dbg(adapter, ERROR,
"CMD_RESP: unknown cmd response %#x\n",
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_event.c b/drivers/net/wireless/marvell/mwifiex/sta_event.c
index 070bce401..0104108b4 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_event.c
@@ -147,6 +147,9 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
+
+ mwifiex_send_cmd(priv, HostCmd_CMD_GTK_REKEY_OFFLOAD_CFG,
+ HostCmd_ACT_GEN_REMOVE, 0, NULL, false);
}
static int mwifiex_parse_tdls_event(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
index d5c56eb9e..8e0862657 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_ioctl.c
@@ -146,6 +146,7 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
size_t beacon_ie_len;
struct mwifiex_bss_priv *bss_priv = (void *)bss->priv;
const struct cfg80211_bss_ies *ies;
+ int ret;
rcu_read_lock();
ies = rcu_dereference(bss->ies);
@@ -189,7 +190,48 @@ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_SPECTRUM_MGMT)
bss_desc->sensed_11h = true;
- return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+ ret = mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc);
+ if (ret)
+ return ret;
+
+ /* Update HT40 capability based on current channel information */
+ if (bss_desc->bcn_ht_oper && bss_desc->bcn_ht_cap) {
+ u8 ht_param = bss_desc->bcn_ht_oper->ht_param;
+ u8 radio = mwifiex_band_to_radio_type(bss_desc->bss_band);
+ struct ieee80211_supported_band *sband =
+ priv->wdev.wiphy->bands[radio];
+ int freq = ieee80211_channel_to_frequency(bss_desc->channel,
+ radio);
+ struct ieee80211_channel *chan =
+ ieee80211_get_channel(priv->adapter->wiphy, freq);
+
+ switch (ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ if (chan->flags & IEEE80211_CHAN_NO_HT40PLUS) {
+ sband->ht_cap.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ } else {
+ sband->ht_cap.cap |=
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ if (chan->flags & IEEE80211_CHAN_NO_HT40MINUS) {
+ sband->ht_cap.cap &=
+ ~IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ sband->ht_cap.cap &= ~IEEE80211_HT_CAP_SGI_40;
+ } else {
+ sband->ht_cap.cap |=
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SGI_40;
+ }
+ break;
+ }
+ }
+
+ return 0;
}
void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv)
@@ -509,7 +551,8 @@ int mwifiex_enable_hs(struct mwifiex_adapter *adapter)
if (priv && priv->sched_scanning) {
#ifdef CONFIG_PM
- if (!priv->wdev.wiphy->wowlan_config->nd_config) {
+ if (priv->wdev.wiphy->wowlan_config &&
+ !priv->wdev.wiphy->wowlan_config->nd_config) {
#endif
mwifiex_dbg(adapter, CMD, "aborting bgscan!\n");
mwifiex_stop_bg_scan(priv);
diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
index 150649602..df9704de0 100644
--- a/drivers/net/wireless/marvell/mwifiex/tdls.c
+++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
@@ -285,7 +285,7 @@ static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
else
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
- /* find the minmum bandwith between AP/TDLS peers */
+ /* find the minimum bandwidth between AP/TDLS peers */
vht_cap = &sta_ptr->tdls_cap.vhtcap;
supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
peer_supp_chwd_set =
diff --git a/drivers/net/wireless/marvell/mwifiex/txrx.c b/drivers/net/wireless/marvell/mwifiex/txrx.c
index bf6182b64..abdd0cf71 100644
--- a/drivers/net/wireless/marvell/mwifiex/txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/txrx.c
@@ -297,6 +297,13 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
goto done;
mwifiex_set_trans_start(priv->netdev);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
+ atomic_dec_return(&adapter->pending_bridged_pkts);
+
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
+ goto done;
+
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += tx_info->pkt_len;
@@ -306,12 +313,6 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
priv->stats.tx_errors++;
}
- if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT)
- atomic_dec_return(&adapter->pending_bridged_pkts);
-
- if (tx_info->flags & MWIFIEX_BUF_FLAG_AGGR_PKT)
- goto done;
-
if (aggr)
/* For skb_aggr, do not wake up tx queue */
goto done;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 16d95b22f..f79d00d1e 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -694,7 +694,7 @@ static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
struct mwifiex_ie_list *ap_ie = cmd_buf;
struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
- if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
+ if (!ap_ie || !ap_ie->len)
return -1;
*ie_size += le16_to_cpu(ap_ie->len) +
@@ -816,7 +816,7 @@ void mwifiex_uap_set_channel(struct mwifiex_private *priv,
chandef.chan->center_freq);
/* Set appropriate bands */
- if (chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (chandef.chan->band == NL80211_BAND_2GHZ) {
bss_cfg->band_cfg = BAND_CONFIG_BG;
config_bands = BAND_B | BAND_G;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
index 52f7981a8..666e91af5 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_txrx.c
@@ -102,6 +102,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
int hdr_chop;
struct ethhdr *p_ethhdr;
struct mwifiex_sta_node *src_node;
+ int index;
uap_rx_pd = (struct uap_rxpd *)(skb->data);
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
@@ -208,10 +209,15 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
}
__net_timestamp(skb);
+
+ index = mwifiex_1d_to_wmm_queue[skb->priority];
+ atomic_inc(&priv->wmm_tx_pending[index]);
mwifiex_wmm_add_buf_txqueue(priv, skb);
atomic_inc(&adapter->tx_pending);
atomic_inc(&adapter->pending_bridged_pkts);
+ mwifiex_queue_main_work(priv->adapter);
+
return;
}
@@ -263,6 +269,96 @@ int mwifiex_handle_uap_rx_forward(struct mwifiex_private *priv,
return mwifiex_process_rx_packet(priv, skb);
}
+int mwifiex_uap_recv_packet(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct mwifiex_adapter *adapter = adapter;
+ struct mwifiex_sta_node *src_node;
+ struct ethhdr *p_ethhdr;
+ struct sk_buff *skb_uap;
+ struct mwifiex_txinfo *tx_info;
+
+ if (!skb)
+ return -1;
+
+ p_ethhdr = (void *)skb->data;
+ src_node = mwifiex_get_sta_entry(priv, p_ethhdr->h_source);
+ if (src_node) {
+ src_node->stats.last_rx = jiffies;
+ src_node->stats.rx_bytes += skb->len;
+ src_node->stats.rx_packets++;
+ }
+
+ skb->dev = priv->netdev;
+ skb->protocol = eth_type_trans(skb, priv->netdev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* This is required only in case of 11n and USB/PCIE as we alloc
+ * a buffer of 4K only if its 11N (to be able to receive 4K
+ * AMSDU packets). In case of SD we allocate buffers based
+ * on the size of packet and hence this is not needed.
+ *
+ * Modifying the truesize here as our allocation for each
+ * skb is 4K but we only receive 2K packets and this cause
+ * the kernel to start dropping packets in case where
+ * application has allocated buffer based on 2K size i.e.
+ * if there a 64K packet received (in IP fragments and
+ * application allocates 64K to receive this packet but
+ * this packet would almost double up because we allocate
+ * each 1.5K fragment in 4K and pass it up. As soon as the
+ * 64K limit hits kernel will start to drop rest of the
+ * fragments. Currently we fail the Filesndl-ht.scr script
+ * for UDP, hence this fix
+ */
+ if ((adapter->iface_type == MWIFIEX_USB ||
+ adapter->iface_type == MWIFIEX_PCIE) &&
+ (skb->truesize > MWIFIEX_RX_DATA_BUF_SIZE))
+ skb->truesize += (skb->len - MWIFIEX_RX_DATA_BUF_SIZE);
+
+ if (is_multicast_ether_addr(p_ethhdr->h_dest) ||
+ mwifiex_get_sta_entry(priv, p_ethhdr->h_dest)) {
+ if (skb_headroom(skb) < MWIFIEX_MIN_DATA_HEADER_LEN)
+ skb_uap =
+ skb_realloc_headroom(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+ else
+ skb_uap = skb_copy(skb, GFP_ATOMIC);
+
+ if (likely(skb_uap)) {
+ tx_info = MWIFIEX_SKB_TXCB(skb_uap);
+ memset(tx_info, 0, sizeof(*tx_info));
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
+ __net_timestamp(skb_uap);
+ mwifiex_wmm_add_buf_txqueue(priv, skb_uap);
+ atomic_inc(&adapter->tx_pending);
+ atomic_inc(&adapter->pending_bridged_pkts);
+ if ((atomic_read(&adapter->pending_bridged_pkts) >=
+ MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
+ mwifiex_dbg(adapter, ERROR,
+ "Tx: Bridge packet limit reached. Drop packet!\n");
+ mwifiex_uap_cleanup_tx_queues(priv);
+ }
+
+ } else {
+ mwifiex_dbg(adapter, ERROR, "failed to allocate skb_uap");
+ }
+
+ mwifiex_queue_main_work(adapter);
+ /* Don't forward Intra-BSS unicast packet to upper layer*/
+ if (mwifiex_get_sta_entry(priv, p_ethhdr->h_dest))
+ return 0;
+ }
+
+ /* Forward multicast/broadcast packet to upper layer*/
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ return 0;
+}
+
/*
* This function processes the packet received on AP interface.
*
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 3ae9be7e4..4c2328f75 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -995,7 +995,8 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
{
int ret = 0;
u8 *firmware = fw->fw_buf, *recv_buff;
- u32 retries = USB8XXX_FW_MAX_RETRY, dlen;
+ u32 retries = USB8XXX_FW_MAX_RETRY + 1;
+ u32 dlen;
u32 fw_seqnum = 0, tlen = 0, dnld_cmd = 0;
struct fw_data *fwdata;
struct fw_sync_header sync_fw;
@@ -1017,8 +1018,10 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
/* Allocate memory for receive */
recv_buff = kzalloc(FW_DNLD_RX_BUF_SIZE, GFP_KERNEL);
- if (!recv_buff)
+ if (!recv_buff) {
+ ret = -ENOMEM;
goto cleanup;
+ }
do {
/* Send pseudo data to check winner status first */
@@ -1041,7 +1044,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
}
/* If the send/receive fails or CRC occurs then retry */
- while (retries--) {
+ while (--retries) {
u8 *buf = (u8 *)fwdata;
u32 len = FW_DATA_XMIT_SIZE;
@@ -1101,7 +1104,7 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
continue;
}
- retries = USB8XXX_FW_MAX_RETRY;
+ retries = USB8XXX_FW_MAX_RETRY + 1;
break;
}
fw_seqnum++;
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 427b961b7..4133136b8 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -346,20 +346,20 @@ struct mwl8k_sta {
#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
static const struct ieee80211_channel mwl8k_channels_24[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412, .hw_value = 1, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417, .hw_value = 2, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422, .hw_value = 3, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427, .hw_value = 4, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 5, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437, .hw_value = 6, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442, .hw_value = 7, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447, .hw_value = 8, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452, .hw_value = 9, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457, .hw_value = 10, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462, .hw_value = 11, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467, .hw_value = 12, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472, .hw_value = 13, },
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484, .hw_value = 14, },
};
static const struct ieee80211_rate mwl8k_rates_24[] = {
@@ -379,10 +379,10 @@ static const struct ieee80211_rate mwl8k_rates_24[] = {
};
static const struct ieee80211_channel mwl8k_channels_50[] = {
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5180, .hw_value = 36, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5200, .hw_value = 40, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5220, .hw_value = 44, },
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 48, },
};
static const struct ieee80211_rate mwl8k_rates_50[] = {
@@ -1010,11 +1010,11 @@ mwl8k_rxd_ap_process(void *_rxd, struct ieee80211_rx_status *status,
}
if (rxd->channel > 14) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
@@ -1118,11 +1118,11 @@ mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
status->flag |= RX_FLAG_HT;
if (rxd->channel > 14) {
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
if (!(status->flag & RX_FLAG_HT))
status->rate_idx -= 5;
} else {
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
}
status->freq = ieee80211_channel_to_frequency(rxd->channel,
status->band);
@@ -2300,13 +2300,13 @@ static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
- priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.band = NL80211_BAND_2GHZ;
priv->band_24.channels = priv->channels_24;
priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
priv->band_24.bitrates = priv->rates_24;
priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band_24;
}
static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
@@ -2319,13 +2319,13 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
- priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.band = NL80211_BAND_5GHZ;
priv->band_50.channels = priv->channels_50;
priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
priv->band_50.bitrates = priv->rates_50;
priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &priv->band_50;
}
/*
@@ -2876,9 +2876,9 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
cmd->band = cpu_to_le16(0x1);
- else if (channel->band == IEEE80211_BAND_5GHZ)
+ else if (channel->band == NL80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
cmd->channel = cpu_to_le16(channel->hw_value);
@@ -3067,7 +3067,7 @@ static int freq_to_idx(struct mwl8k_priv *priv, int freq)
struct ieee80211_supported_band *sband;
int band, ch, idx = 0;
- for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
sband = priv->hw->wiphy->bands[band];
if (!sband)
continue;
@@ -3149,9 +3149,9 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
- if (channel->band == IEEE80211_BAND_2GHZ)
+ if (channel->band == NL80211_BAND_2GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000001);
- else if (channel->band == IEEE80211_BAND_5GHZ)
+ else if (channel->band == NL80211_BAND_5GHZ)
cmd->channel_flags |= cpu_to_le32(0x00000004);
if (!priv->sw_scan_start) {
@@ -4094,10 +4094,10 @@ static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
cmd->stn_id = cpu_to_le16(sta->aid);
cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+ rates = sta->supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
cmd->legacy_rates = cpu_to_le32(rates);
if (sta->ht_cap.ht_supported) {
cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
@@ -4529,10 +4529,10 @@ static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
p->ht_caps = cpu_to_le16(sta->ht_cap.cap);
p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
((sta->ht_cap.ampdu_density & 7) << 2);
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
- rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
+ rates = sta->supp_rates[NL80211_BAND_2GHZ];
else
- rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ rates = sta->supp_rates[NL80211_BAND_5GHZ] << 5;
legacy_rate_mask_to_array(p->legacy_rates, rates);
memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
p->interop = 1;
@@ -5010,11 +5010,11 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
goto out;
}
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) {
- ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[NL80211_BAND_2GHZ];
} else {
ap_legacy_rates =
- ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ ap->supp_rates[NL80211_BAND_5GHZ] << 5;
}
memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
@@ -5042,7 +5042,7 @@ mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
idx--;
if (hw->conf.chandef.chan->band ==
- IEEE80211_BAND_2GHZ)
+ NL80211_BAND_2GHZ)
rate = mwl8k_rates_24[idx].hw_value;
else
rate = mwl8k_rates_50[idx].hw_value;
@@ -5116,7 +5116,7 @@ mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (idx)
idx--;
- if (hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ)
+ if (hw->conf.chandef.chan->band == NL80211_BAND_2GHZ)
rate = mwl8k_rates_24[idx].hw_value;
else
rate = mwl8k_rates_50[idx].hw_value;
@@ -5388,7 +5388,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
struct ieee80211_supported_band *sband;
if (priv->ap_fw) {
- sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -5396,7 +5396,7 @@ static int mwl8k_get_survey(struct ieee80211_hw *hw, int idx,
}
if (!sband)
- sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
if (!sband || idx >= sband->n_channels)
return -ENOENT;
diff --git a/drivers/net/wireless/mediatek/mt7601u/init.c b/drivers/net/wireless/mediatek/mt7601u/init.c
index 26190fd33..8fa78d715 100644
--- a/drivers/net/wireless/mediatek/mt7601u/init.c
+++ b/drivers/net/wireless/mediatek/mt7601u/init.c
@@ -469,7 +469,7 @@ struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev)
}
#define CHAN2G(_idx, _freq) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 30, \
@@ -563,7 +563,7 @@ mt76_init_sband_2g(struct mt7601u_dev *dev)
{
dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g),
GFP_KERNEL);
- dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = dev->sband_2g;
+ dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g;
WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num >
ARRAY_SIZE(mt76_channels_2ghz));
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index 7fa0128de..bf3f0a399 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -777,7 +777,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
u8 offset1;
u8 offset2;
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
@@ -1174,7 +1174,7 @@ static void rt2800_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
unsigned int polarity =
rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
EEPROM_FREQ_LED_POLARITY);
@@ -1741,7 +1741,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
u8 led_ctrl, led_g_mode, led_r_mode;
rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1);
rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1);
} else {
@@ -1844,7 +1844,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_has_cap_bt_coexist(rt2x00dev)) {
rt2x00_set_field8(&r3, BBP3_RX_ADC, 1);
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA,
- rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_5GHZ);
rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B);
} else {
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
@@ -3451,7 +3451,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
* Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
@@ -3546,7 +3546,7 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
}
static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
u16 eeprom;
u8 comp_en;
@@ -3562,7 +3562,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
return 0;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
comp_en = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_DELTA_ENABLE_2G);
if (comp_en) {
@@ -3611,7 +3611,7 @@ static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev,
}
static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
- enum ieee80211_band band, int power_level,
+ enum nl80211_band band, int power_level,
u8 txpower, int delta)
{
u16 eeprom;
@@ -3639,7 +3639,7 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
&eeprom);
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
eirp_txpower_criterion = rt2x00_get_field16(eeprom,
EEPROM_EIRP_MAX_TX_POWER_2GHZ);
else
@@ -3686,7 +3686,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
u32 regs[TX_PWR_CFG_IDX_COUNT];
unsigned int offset;
- enum ieee80211_band band = chan->band;
+ enum nl80211_band band = chan->band;
int delta;
int i;
@@ -3697,7 +3697,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
/* calculate temperature compensation delta */
delta = rt2800_get_gain_calibration_delta(rt2x00dev);
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
offset = 16;
else
offset = 0;
@@ -4055,7 +4055,7 @@ static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
rt2x00_dbg(rt2x00dev,
"band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
- (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+ (band == NL80211_BAND_5GHZ) ? '5' : '2',
(test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
'4' : '2',
(i > TX_PWR_CFG_9_IDX) ?
@@ -4081,7 +4081,7 @@ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
u32 reg, offset;
int i, is_rate_b, delta, power_ctrl;
- enum ieee80211_band band = chan->band;
+ enum nl80211_band band = chan->band;
/*
* Calculate HT40 compensation. For 40MHz we need to add or subtract
@@ -4436,7 +4436,7 @@ static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
u8 vgc;
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) {
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3071) ||
rt2x00_rt(rt2x00dev, RT3090) ||
@@ -4511,7 +4511,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
case RT3572:
case RT3593:
if (qual->rssi > -65) {
- if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ)
vgc += 0x20;
else
vgc += 0x10;
@@ -7492,6 +7492,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
if (!rt2x00_is_usb(rt2x00dev))
ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING);
+ /* Set MFP if HW crypto is disabled. */
+ if (rt2800_hwcrypt_disabled(rt2x00dev))
+ ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE);
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2800_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 6418620f9..f68d49212 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -38,6 +38,7 @@
#include <linux/kfifo.h>
#include <linux/hrtimer.h>
#include <linux/average.h>
+#include <linux/usb.h>
#include <net/mac80211.h>
@@ -752,8 +753,8 @@ struct rt2x00_dev {
* IEEE80211 control structure.
*/
struct ieee80211_hw *hw;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- enum ieee80211_band curr_band;
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
+ enum nl80211_band curr_band;
int curr_freq;
/*
@@ -1002,6 +1003,8 @@ struct rt2x00_dev {
/* Extra TX headroom required for alignment purposes. */
unsigned int extra_tx_headroom;
+
+ struct usb_anchor *anchor;
};
struct rt2x00_bar_list_entry {
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
index 5639ed816..4e0c56530 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
@@ -911,7 +911,7 @@ static void rt2x00lib_channel(struct ieee80211_channel *entry,
const int value)
{
/* XXX: this assumption about the band is wrong for 802.11j */
- entry->band = channel <= 14 ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+ entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
entry->center_freq = ieee80211_channel_to_frequency(channel,
entry->band);
entry->hw_value = value;
@@ -975,13 +975,13 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
* Channels: 2.4 GHz
*/
if (spec->supported_bands & SUPPORT_BAND_2GHZ) {
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_channels = 14;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].n_bitrates = num_rates;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].channels = channels;
- rt2x00dev->bands[IEEE80211_BAND_2GHZ].bitrates = rates;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &rt2x00dev->bands[IEEE80211_BAND_2GHZ];
- memcpy(&rt2x00dev->bands[IEEE80211_BAND_2GHZ].ht_cap,
+ rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels;
+ rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &rt2x00dev->bands[NL80211_BAND_2GHZ];
+ memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap,
&spec->ht, sizeof(spec->ht));
}
@@ -991,15 +991,15 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
* Channels: OFDM, UNII, HiperLAN2.
*/
if (spec->supported_bands & SUPPORT_BAND_5GHZ) {
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_channels =
+ rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels =
spec->num_channels - 14;
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].n_bitrates =
+ rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates =
num_rates - 4;
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].channels = &channels[14];
- rt2x00dev->bands[IEEE80211_BAND_5GHZ].bitrates = &rates[4];
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &rt2x00dev->bands[IEEE80211_BAND_5GHZ];
- memcpy(&rt2x00dev->bands[IEEE80211_BAND_5GHZ].ht_cap,
+ rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14];
+ rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4];
+ hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &rt2x00dev->bands[NL80211_BAND_5GHZ];
+ memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap,
&spec->ht, sizeof(spec->ht));
}
@@ -1016,11 +1016,11 @@ static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev)
if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags))
ieee80211_unregister_hw(rt2x00dev->hw);
- if (likely(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ])) {
- kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->channels);
- kfree(rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ]->bitrates);
- rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
- rt2x00dev->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) {
+ kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels);
+ kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates);
+ rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
}
kfree(rt2x00dev->spec.channels_info);
@@ -1422,11 +1422,14 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->intf_work);
cancel_delayed_work_sync(&rt2x00dev->autowakeup_work);
cancel_work_sync(&rt2x00dev->sleep_work);
+#ifdef CONFIG_RT2X00_LIB_USB
if (rt2x00_is_usb(rt2x00dev)) {
+ usb_kill_anchored_urbs(rt2x00dev->anchor);
hrtimer_cancel(&rt2x00dev->txstatus_timer);
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
+#endif
if (rt2x00dev->workqueue)
destroy_workqueue(rt2x00dev->workqueue);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
index 7627af609..7cf26c612 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
@@ -171,8 +171,11 @@ static void rt2x00usb_register_read_async_cb(struct urb *urb)
{
struct rt2x00_async_read_data *rd = urb->context;
if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) {
- if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+ usb_anchor_urb(urb, rd->rt2x00dev->anchor);
+ if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+ usb_unanchor_urb(urb);
kfree(rd);
+ }
} else
kfree(rd);
}
@@ -206,8 +209,11 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev,
usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0),
(unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg),
rt2x00usb_register_read_async_cb, rd);
- if (usb_submit_urb(urb, GFP_ATOMIC) < 0)
+ usb_anchor_urb(urb, rt2x00dev->anchor);
+ if (usb_submit_urb(urb, GFP_ATOMIC) < 0) {
+ usb_unanchor_urb(urb);
kfree(rd);
+ }
usb_free_urb(urb);
}
EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async);
@@ -313,8 +319,10 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
entry->skb->data, length,
rt2x00usb_interrupt_txdone, entry);
+ usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
+ usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -402,8 +410,10 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
entry->skb->data, entry->skb->len,
rt2x00usb_interrupt_rxdone, entry);
+ usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
if (status) {
+ usb_unanchor_urb(entry_priv->urb);
if (status == -ENODEV)
clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
@@ -818,6 +828,13 @@ int rt2x00usb_probe(struct usb_interface *usb_intf,
if (retval)
goto exit_free_reg;
+ rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev,
+ sizeof(struct usb_anchor),
+ GFP_KERNEL);
+ if (!rt2x00dev->anchor)
+ goto exit_free_reg;
+
+ init_usb_anchor(rt2x00dev->anchor);
return 0;
exit_free_reg:
diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
index 1d07ff74d..1c4226701 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c
@@ -252,9 +252,9 @@ static void rt61pci_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int a_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -643,12 +643,12 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
- (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ));
+ (rt2x00dev->curr_band != NL80211_BAND_5GHZ));
break;
case ANTENNA_A:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -657,7 +657,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
default:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -808,7 +808,7 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
sel = antenna_sel_a;
lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
@@ -822,9 +822,9 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00mmio_register_read(rt2x00dev, PHY_CSR0, &reg);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
- rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_2GHZ);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
- rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ rt2x00dev->curr_band == NL80211_BAND_5GHZ);
rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg);
@@ -846,7 +846,7 @@ static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
short lna_gain = 0;
- if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
@@ -1048,7 +1048,7 @@ static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev,
/*
* Determine r17 bounds.
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
@@ -2077,7 +2077,7 @@ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
return 0;
}
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
if (lna == 3 || lna == 2)
offset += 10;
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
index 61637defa..903cc6f67 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c
@@ -197,9 +197,9 @@ static void rt73usb_brightness_set(struct led_classdev *led_cdev,
container_of(led_cdev, struct rt2x00_led, led_dev);
unsigned int enabled = brightness != LED_OFF;
unsigned int a_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_5GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ);
unsigned int bg_mode =
- (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
+ (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ);
if (led->type == LED_TYPE_RADIO) {
rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg,
@@ -593,13 +593,13 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
case ANTENNA_HW_DIVERSITY:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2);
temp = !rt2x00_has_cap_frame_type(rt2x00dev) &&
- (rt2x00dev->curr_band != IEEE80211_BAND_5GHZ);
+ (rt2x00dev->curr_band != NL80211_BAND_5GHZ);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp);
break;
case ANTENNA_A:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
@@ -608,7 +608,7 @@ static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
default:
rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1);
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ)
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ)
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3);
else
rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0);
@@ -704,7 +704,7 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY ||
ant->tx == ANTENNA_SW_DIVERSITY);
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
sel = antenna_sel_a;
lna = rt2x00_has_cap_external_lna_a(rt2x00dev);
} else {
@@ -718,9 +718,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_read(rt2x00dev, PHY_CSR0, &reg);
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG,
- (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ));
+ (rt2x00dev->curr_band == NL80211_BAND_2GHZ));
rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A,
- (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ));
+ (rt2x00dev->curr_band == NL80211_BAND_5GHZ));
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
@@ -736,7 +736,7 @@ static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
u16 eeprom;
short lna_gain = 0;
- if (libconf->conf->chandef.chan->band == IEEE80211_BAND_2GHZ) {
+ if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) {
if (rt2x00_has_cap_external_lna_bg(rt2x00dev))
lna_gain += 14;
@@ -923,7 +923,7 @@ static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev,
/*
* Determine r17 bounds.
*/
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
low_bound = 0x28;
up_bound = 0x48;
@@ -1657,7 +1657,7 @@ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1)
return 0;
}
- if (rt2x00dev->curr_band == IEEE80211_BAND_5GHZ) {
+ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) {
if (rt2x00_has_cap_external_lna_a(rt2x00dev)) {
if (lna == 3 || lna == 2)
offset += 10;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index a43a16fde..e895a8448 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -526,7 +526,7 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
* ieee80211_generic_frame_duration
*/
duration = ieee80211_generic_frame_duration(dev, priv->vif,
- IEEE80211_BAND_2GHZ, skb->len,
+ NL80211_BAND_2GHZ, skb->len,
ieee80211_get_tx_rate(dev, info));
frame_duration = priv->ack_time + le16_to_cpu(duration);
@@ -1018,6 +1018,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
dma_addr_t *mapping;
entry = priv->rx_ring + priv->rx_ring_sz*i;
if (!skb) {
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot allocate RX skb\n");
return -ENOMEM;
}
@@ -1028,6 +1030,8 @@ static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
if (pci_dma_mapping_error(priv->pdev, *mapping)) {
kfree_skb(skb);
+ pci_free_consistent(priv->pdev, priv->rx_ring_sz * 32,
+ priv->rx_ring, priv->rx_ring_dma);
wiphy_err(dev->wiphy, "Cannot map DMA for RX skb\n");
return -ENOMEM;
}
@@ -1529,7 +1533,7 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
priv->ack_time =
le16_to_cpu(ieee80211_generic_frame_duration(dev,
priv->vif,
- IEEE80211_BAND_2GHZ, 10,
+ NL80211_BAND_2GHZ, 10,
&priv->rates[0])) - 10;
rtl8180_conf_erp(dev, info);
@@ -1736,7 +1740,7 @@ static int rtl8180_probe(struct pci_dev *pdev,
if (err) {
printk(KERN_ERR "%s (rtl8180): Cannot obtain PCI resources\n",
pci_name(pdev));
- return err;
+ goto err_disable_dev;
}
io_addr = pci_resource_start(pdev, 0);
@@ -1795,12 +1799,12 @@ static int rtl8180_probe(struct pci_dev *pdev,
memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.channels = priv->channels;
priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = 4;
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
ieee80211_hw_set(dev, HOST_BROADCAST_PS_BUFFERING);
ieee80211_hw_set(dev, RX_INCLUDES_FCS);
@@ -1938,6 +1942,8 @@ static int rtl8180_probe(struct pci_dev *pdev,
err_free_reg:
pci_release_regions(pdev);
+
+ err_disable_dev:
pci_disable_device(pdev);
return err;
}
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index b7f72f9c7..231f84db9 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1470,12 +1470,12 @@ static int rtl8187_probe(struct usb_interface *intf,
memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
priv->map = (struct rtl818x_csr *)0xFF00;
- priv->band.band = IEEE80211_BAND_2GHZ;
+ priv->band.band = NL80211_BAND_2GHZ;
priv->band.channels = priv->channels;
priv->band.n_channels = ARRAY_SIZE(rtl818x_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(rtl818x_rates);
- dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
ieee80211_hw_set(dev, RX_INCLUDES_FCS);
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
index a6ad79f61..324451df9 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h
@@ -160,104 +160,40 @@ struct rtl8187_priv {
void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
-static inline u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
- u8 *addr, u8 idx)
-{
- u8 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits8;
- mutex_unlock(&priv->io_mutex);
-
- return val;
-}
+u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 idx);
static inline u8 rtl818x_ioread8(struct rtl8187_priv *priv, u8 *addr)
{
return rtl818x_ioread8_idx(priv, addr, 0);
}
-static inline u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
- __le16 *addr, u8 idx)
-{
- __le16 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits16;
- mutex_unlock(&priv->io_mutex);
-
- return le16_to_cpu(val);
-}
+u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u8 idx);
static inline u16 rtl818x_ioread16(struct rtl8187_priv *priv, __le16 *addr)
{
return rtl818x_ioread16_idx(priv, addr, 0);
}
-static inline u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
- __le32 *addr, u8 idx)
-{
- __le32 val;
-
- mutex_lock(&priv->io_mutex);
- usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
- RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
-
- val = priv->io_dmabuf->bits32;
- mutex_unlock(&priv->io_mutex);
-
- return le32_to_cpu(val);
-}
+u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u8 idx);
static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr)
{
return rtl818x_ioread32_idx(priv, addr, 0);
}
-static inline void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
- u8 *addr, u8 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits8 = val;
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 val, u8 idx);
static inline void rtl818x_iowrite8(struct rtl8187_priv *priv, u8 *addr, u8 val)
{
rtl818x_iowrite8_idx(priv, addr, val, 0);
}
-static inline void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
- __le16 *addr, u16 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits16 = cpu_to_le16(val);
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u16 val, u8 idx);
static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr,
u16 val)
@@ -265,19 +201,8 @@ static inline void rtl818x_iowrite16(struct rtl8187_priv *priv, __le16 *addr,
rtl818x_iowrite16_idx(priv, addr, val, 0);
}
-static inline void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
- __le32 *addr, u32 val, u8 idx)
-{
- mutex_lock(&priv->io_mutex);
-
- priv->io_dmabuf->bits32 = cpu_to_le32(val);
- usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
- RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
- (unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
-
- mutex_unlock(&priv->io_mutex);
-}
+void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u32 val, u8 idx);
static inline void rtl818x_iowrite32(struct rtl8187_priv *priv, __le32 *addr,
u32 val)
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index 5ecf18ed6..e6668ffb7 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -22,6 +22,99 @@
#include "rtl8187.h"
#include "rtl8225.h"
+u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 idx)
+{
+ u8 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits8;
+ mutex_unlock(&priv->io_mutex);
+
+ return val;
+}
+
+u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u8 idx)
+{
+ __le16 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits16;
+ mutex_unlock(&priv->io_mutex);
+
+ return le16_to_cpu(val);
+}
+
+u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u8 idx)
+{
+ __le32 val;
+
+ mutex_lock(&priv->io_mutex);
+ usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
+ RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+
+ val = priv->io_dmabuf->bits32;
+ mutex_unlock(&priv->io_mutex);
+
+ return le32_to_cpu(val);
+}
+
+void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
+ u8 *addr, u8 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits8 = val;
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
+void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
+ __le16 *addr, u16 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits16 = cpu_to_le16(val);
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
+void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
+ __le32 *addr, u32 val, u8 idx)
+{
+ mutex_lock(&priv->io_mutex);
+
+ priv->io_dmabuf->bits32 = cpu_to_le32(val);
+ usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
+ RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
+ (unsigned long)addr, idx & 0x03,
+ &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+
+ mutex_unlock(&priv->io_mutex);
+}
+
static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
{
struct rtl8187_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index 5dea3bb93..1cf951eb0 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -1 +1,4 @@
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
+
+rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
+ rtl8xxxu_8723a.o rtl8xxxu_8192c.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index 7b73654e1..870c9cd5c 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -42,12 +42,18 @@
#define REALTEK_USB_CMD_IDX 0x00
#define TX_TOTAL_PAGE_NUM 0xf8
+#define TX_TOTAL_PAGE_NUM_8192E 0xf3
/* (HPQ + LPQ + NPQ + PUBQ) = TX_TOTAL_PAGE_NUM */
#define TX_PAGE_NUM_PUBQ 0xe7
#define TX_PAGE_NUM_HI_PQ 0x0c
#define TX_PAGE_NUM_LO_PQ 0x02
#define TX_PAGE_NUM_NORM_PQ 0x02
+#define TX_PAGE_NUM_PUBQ_8192E 0xe7
+#define TX_PAGE_NUM_HI_PQ_8192E 0x08
+#define TX_PAGE_NUM_LO_PQ_8192E 0x0c
+#define TX_PAGE_NUM_NORM_PQ_8192E 0x00
+
#define RTL_FW_PAGE_SIZE 4096
#define RTL8XXXU_FIRMWARE_POLL_MAX 1000
@@ -65,13 +71,37 @@
#define EFUSE_BT_MAP_LEN_8723A 1024
#define EFUSE_MAX_WORD_UNIT 4
+enum rtl8xxxu_rtl_chip {
+ RTL8192S = 0x81920,
+ RTL8191S = 0x81910,
+ RTL8192C = 0x8192c,
+ RTL8191C = 0x8191c,
+ RTL8188C = 0x8188c,
+ RTL8188R = 0x81889,
+ RTL8192D = 0x8192d,
+ RTL8723A = 0x8723a,
+ RTL8188E = 0x8188e,
+ RTL8812 = 0x88120,
+ RTL8821 = 0x88210,
+ RTL8192E = 0x8192e,
+ RTL8191E = 0x8191e,
+ RTL8723B = 0x8723b,
+ RTL8814A = 0x8814a,
+ RTL8881A = 0x8881a,
+ RTL8821B = 0x8821b,
+ RTL8822B = 0x8822b,
+ RTL8703B = 0x8703b,
+ RTL8195A = 0x8195a,
+ RTL8188F = 0x8188f
+};
+
enum rtl8xxxu_rx_type {
RX_TYPE_DATA_PKT = 0,
RX_TYPE_C2H = 1,
RX_TYPE_ERROR = -1
};
-struct rtl8xxxu_rx_desc {
+struct rtl8xxxu_rxdesc16 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
u32 crc32:1;
@@ -207,7 +237,7 @@ struct rtl8xxxu_rx_desc {
#endif
};
-struct rtl8723bu_rx_desc {
+struct rtl8xxxu_rxdesc24 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
u32 crc32:1;
@@ -332,7 +362,7 @@ struct rtl8723bu_rx_desc {
__le32 tsfl;
};
-struct rtl8723au_tx_desc {
+struct rtl8xxxu_txdesc32 {
__le16 pkt_size;
u8 pkt_offset;
u8 txdw0;
@@ -346,7 +376,7 @@ struct rtl8723au_tx_desc {
__le16 txdw7;
};
-struct rtl8723bu_tx_desc {
+struct rtl8xxxu_txdesc40 {
__le16 pkt_size;
u8 pkt_offset;
u8 txdw0;
@@ -422,10 +452,10 @@ struct rtl8723bu_tx_desc {
* aggregation enable and break respectively. For 8723bu, bits 0-7 are macid.
*/
#define TXDESC_PKT_OFFSET_SZ 0
-#define TXDESC_AGG_ENABLE_8723A BIT(5)
-#define TXDESC_AGG_BREAK_8723A BIT(6)
-#define TXDESC_MACID_SHIFT_8723B 0
-#define TXDESC_MACID_MASK_8723B 0x00f0
+#define TXDESC32_AGG_ENABLE BIT(5)
+#define TXDESC32_AGG_BREAK BIT(6)
+#define TXDESC40_MACID_SHIFT 0
+#define TXDESC40_MACID_MASK 0x00f0
#define TXDESC_QUEUE_SHIFT 8
#define TXDESC_QUEUE_MASK 0x1f00
#define TXDESC_QUEUE_BK 0x2
@@ -437,9 +467,9 @@ struct rtl8723bu_tx_desc {
#define TXDESC_QUEUE_MGNT 0x12
#define TXDESC_QUEUE_CMD 0x13
#define TXDESC_QUEUE_MAX (TXDESC_QUEUE_CMD + 1)
-#define TXDESC_RDG_NAV_EXT_8723B BIT(13)
-#define TXDESC_LSIG_TXOP_ENABLE_8723B BIT(14)
-#define TXDESC_PIFS_8723B BIT(15)
+#define TXDESC40_RDG_NAV_EXT BIT(13)
+#define TXDESC40_LSIG_TXOP_ENABLE BIT(14)
+#define TXDESC40_PIFS BIT(15)
#define DESC_RATE_ID_SHIFT 16
#define DESC_RATE_ID_MASK 0xf
@@ -451,71 +481,71 @@ struct rtl8723bu_tx_desc {
#define TXDESC_HWPC BIT(31)
/* Word 2 */
-#define TXDESC_PAID_SHIFT_8723B 0
-#define TXDESC_PAID_MASK_8723B 0x1ff
-#define TXDESC_CCA_RTS_SHIFT_8723B 10
-#define TXDESC_CCA_RTS_MASK_8723B 0xc00
-#define TXDESC_AGG_ENABLE_8723B BIT(12)
-#define TXDESC_RDG_ENABLE_8723B BIT(13)
-#define TXDESC_AGG_BREAK_8723B BIT(16)
-#define TXDESC_MORE_FRAG_8723B BIT(17)
-#define TXDESC_RAW_8723B BIT(18)
-#define TXDESC_ACK_REPORT_8723A BIT(19)
-#define TXDESC_SPE_RPT_8723B BIT(19)
+#define TXDESC40_PAID_SHIFT 0
+#define TXDESC40_PAID_MASK 0x1ff
+#define TXDESC40_CCA_RTS_SHIFT 10
+#define TXDESC40_CCA_RTS_MASK 0xc00
+#define TXDESC40_AGG_ENABLE BIT(12)
+#define TXDESC40_RDG_ENABLE BIT(13)
+#define TXDESC40_AGG_BREAK BIT(16)
+#define TXDESC40_MORE_FRAG BIT(17)
+#define TXDESC40_RAW BIT(18)
+#define TXDESC32_ACK_REPORT BIT(19)
+#define TXDESC40_SPE_RPT BIT(19)
#define TXDESC_AMPDU_DENSITY_SHIFT 20
-#define TXDESC_BT_INT_8723B BIT(23)
-#define TXDESC_GID_8723B BIT(24)
+#define TXDESC40_BT_INT BIT(23)
+#define TXDESC40_GID_SHIFT 24
/* Word 3 */
-#define TXDESC_USE_DRIVER_RATE_8723B BIT(8)
-#define TXDESC_CTS_SELF_ENABLE_8723B BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723B BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723B BIT(13)
-#define TXDESC_SEQ_SHIFT_8723A 16
-#define TXDESC_SEQ_MASK_8723A 0x0fff0000
+#define TXDESC40_USE_DRIVER_RATE BIT(8)
+#define TXDESC40_CTS_SELF_ENABLE BIT(11)
+#define TXDESC40_RTS_CTS_ENABLE BIT(12)
+#define TXDESC40_HW_RTS_ENABLE BIT(13)
+#define TXDESC32_SEQ_SHIFT 16
+#define TXDESC32_SEQ_MASK 0x0fff0000
/* Word 4 */
-#define TXDESC_RTS_RATE_SHIFT_8723A 0
-#define TXDESC_RTS_RATE_MASK_8723A 0x3f
-#define TXDESC_QOS_8723A BIT(6)
-#define TXDESC_HW_SEQ_ENABLE_8723A BIT(7)
-#define TXDESC_USE_DRIVER_RATE_8723A BIT(8)
+#define TXDESC32_RTS_RATE_SHIFT 0
+#define TXDESC32_RTS_RATE_MASK 0x3f
+#define TXDESC32_QOS BIT(6)
+#define TXDESC32_HW_SEQ_ENABLE BIT(7)
+#define TXDESC32_USE_DRIVER_RATE BIT(8)
#define TXDESC_DISABLE_DATA_FB BIT(10)
-#define TXDESC_CTS_SELF_ENABLE_8723A BIT(11)
-#define TXDESC_RTS_CTS_ENABLE_8723A BIT(12)
-#define TXDESC_HW_RTS_ENABLE_8723A BIT(13)
+#define TXDESC32_CTS_SELF_ENABLE BIT(11)
+#define TXDESC32_RTS_CTS_ENABLE BIT(12)
+#define TXDESC32_HW_RTS_ENABLE BIT(13)
#define TXDESC_PRIME_CH_OFF_LOWER BIT(20)
#define TXDESC_PRIME_CH_OFF_UPPER BIT(21)
-#define TXDESC_SHORT_PREAMBLE_8723A BIT(24)
+#define TXDESC32_SHORT_PREAMBLE BIT(24)
#define TXDESC_DATA_BW BIT(25)
#define TXDESC_RTS_DATA_BW BIT(27)
#define TXDESC_RTS_PRIME_CH_OFF_LOWER BIT(28)
#define TXDESC_RTS_PRIME_CH_OFF_UPPER BIT(29)
-#define TXDESC_DATA_RATE_FB_SHIFT_8723B 8
-#define TXDESC_DATA_RATE_FB_MASK_8723B 0x00001f00
-#define TXDESC_RETRY_LIMIT_ENABLE_8723B BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723B 18
-#define TXDESC_RETRY_LIMIT_MASK_8723B 0x00fc0000
-#define TXDESC_RTS_RATE_SHIFT_8723B 24
-#define TXDESC_RTS_RATE_MASK_8723B 0x3f000000
+#define TXDESC40_DATA_RATE_FB_SHIFT 8
+#define TXDESC40_DATA_RATE_FB_MASK 0x00001f00
+#define TXDESC40_RETRY_LIMIT_ENABLE BIT(17)
+#define TXDESC40_RETRY_LIMIT_SHIFT 18
+#define TXDESC40_RETRY_LIMIT_MASK 0x00fc0000
+#define TXDESC40_RTS_RATE_SHIFT 24
+#define TXDESC40_RTS_RATE_MASK 0x3f000000
/* Word 5 */
-#define TXDESC_SHORT_PREAMBLE_8723B BIT(4)
-#define TXDESC_SHORT_GI BIT(6)
+#define TXDESC40_SHORT_PREAMBLE BIT(4)
+#define TXDESC32_SHORT_GI BIT(6)
#define TXDESC_CCX_TAG BIT(7)
-#define TXDESC_RETRY_LIMIT_ENABLE_8723A BIT(17)
-#define TXDESC_RETRY_LIMIT_SHIFT_8723A 18
-#define TXDESC_RETRY_LIMIT_MASK_8723A 0x00fc0000
+#define TXDESC32_RETRY_LIMIT_ENABLE BIT(17)
+#define TXDESC32_RETRY_LIMIT_SHIFT 18
+#define TXDESC32_RETRY_LIMIT_MASK 0x00fc0000
/* Word 6 */
#define TXDESC_MAX_AGG_SHIFT 11
/* Word 8 */
-#define TXDESC_HW_SEQ_ENABLE_8723B BIT(15)
+#define TXDESC40_HW_SEQ_ENABLE BIT(15)
/* Word 9 */
-#define TXDESC_SEQ_SHIFT_8723B 12
-#define TXDESC_SEQ_MASK_8723B 0x00fff000
+#define TXDESC40_SEQ_SHIFT 12
+#define TXDESC40_SEQ_MASK 0x00fff000
struct phy_rx_agc_info {
#ifdef __LITTLE_ENDIAN
@@ -600,6 +630,31 @@ struct rtl8xxxu_firmware_header {
};
/*
+ * 8723au/8192cu/8188ru required base power index offset tables.
+ */
+struct rtl8xxxu_power_base {
+ u32 reg_0e00;
+ u32 reg_0e04;
+ u32 reg_0e08;
+ u32 reg_086c;
+
+ u32 reg_0e10;
+ u32 reg_0e14;
+ u32 reg_0e18;
+ u32 reg_0e1c;
+
+ u32 reg_0830;
+ u32 reg_0834;
+ u32 reg_0838;
+ u32 reg_086c_2;
+
+ u32 reg_083c;
+ u32 reg_0848;
+ u32 reg_084c;
+ u32 reg_0868;
+};
+
+/*
* The 8723au has 3 channel groups: 1-3, 4-9, and 10-14
*/
struct rtl8723au_idx {
@@ -763,55 +818,49 @@ struct rtl8192eu_efuse_tx_power {
u8 cck_base[6];
u8 ht40_base[5];
struct rtl8723au_idx ht20_ofdm_1s_diff;
- struct rtl8723au_idx ht40_ht20_2s_diff;
- struct rtl8723au_idx ofdm_cck_2s_diff; /* not used */
- struct rtl8723au_idx ht40_ht20_3s_diff;
- struct rtl8723au_idx ofdm_cck_3s_diff; /* not used */
- struct rtl8723au_idx ht40_ht20_4s_diff;
- struct rtl8723au_idx ofdm_cck_4s_diff; /* not used */
+ struct rtl8723bu_pwr_idx pwr_diff[3];
+ u8 dummy5g[24]; /* max channel group (14) + power diff offset (10) */
};
struct rtl8192eu_efuse {
__le16 rtl_id;
u8 res0[0x0e];
struct rtl8192eu_efuse_tx_power tx_power_index_A; /* 0x10 */
- struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x22 */
- struct rtl8192eu_efuse_tx_power tx_power_index_C; /* 0x34 */
- struct rtl8192eu_efuse_tx_power tx_power_index_D; /* 0x46 */
- u8 res1[0x60];
+ struct rtl8192eu_efuse_tx_power tx_power_index_B; /* 0x3a */
+ u8 res2[0x54];
u8 channel_plan; /* 0xb8 */
u8 xtal_k;
u8 thermal_meter;
u8 iqk_lck;
u8 pa_type; /* 0xbc */
u8 lna_type_2g; /* 0xbd */
- u8 res2[1];
+ u8 res3[1];
u8 lna_type_5g; /* 0xbf */
- u8 res13[1];
+ u8 res4[1];
u8 rf_board_option;
u8 rf_feature_option;
u8 rf_bt_setting;
u8 eeprom_version;
u8 eeprom_customer_id;
- u8 res3[3];
+ u8 res5[3];
u8 rf_antenna_option; /* 0xc9 */
- u8 res4[6];
+ u8 res6[6];
u8 vid; /* 0xd0 */
- u8 res5[1];
+ u8 res7[1];
u8 pid; /* 0xd2 */
- u8 res6[1];
+ u8 res8[1];
u8 usb_optional_function;
- u8 res7[2];
+ u8 res9[2];
u8 mac_addr[ETH_ALEN]; /* 0xd7 */
- u8 res8[2];
+ u8 res10[2];
u8 vendor_name[7];
- u8 res9[2];
+ u8 res11[2];
u8 device_name[0x0b]; /* 0xe8 */
- u8 res10[2];
+ u8 res12[2];
u8 serial[0x0b]; /* 0xf5 */
- u8 res11[0x30];
+ u8 res13[0x30];
u8 unknown[0x0d]; /* 0x130 */
- u8 res12[0xc3];
+ u8 res14[0xc3];
};
struct rtl8xxxu_reg8val {
@@ -1177,6 +1226,7 @@ struct rtl8xxxu_priv {
struct rtl8723au_idx ofdm_tx_power_diff[RTL8723B_TX_COUNT];
struct rtl8723au_idx ht20_tx_power_diff[RTL8723B_TX_COUNT];
struct rtl8723au_idx ht40_tx_power_diff[RTL8723B_TX_COUNT];
+ struct rtl8xxxu_power_base *power_base;
u32 chip_cut:4;
u32 rom_rev:4;
u32 is_multi_func:1;
@@ -1204,7 +1254,6 @@ struct rtl8xxxu_priv {
u8 rf_paths;
u8 rx_paths;
u8 tx_paths;
- u32 rf_mode_ag[2];
u32 rege94;
u32 rege9c;
u32 regeb4;
@@ -1236,8 +1285,9 @@ struct rtl8xxxu_priv {
u32 mac_backup[RTL8XXXU_MAC_REGS];
u32 bb_backup[RTL8XXXU_BB_REGS];
u32 bb_recovery_backup[RTL8XXXU_BB_REGS];
- u32 rtlchip;
+ enum rtl8xxxu_rtl_chip rtl_chip;
u8 pi_enabled:1;
+ u8 no_pape:1;
u8 int_buf[USB_INTR_CONTENT_LENGTH];
};
@@ -1260,6 +1310,8 @@ struct rtl8xxxu_fileops {
void (*power_off) (struct rtl8xxxu_priv *priv);
void (*reset_8051) (struct rtl8xxxu_priv *priv);
int (*llt_init) (struct rtl8xxxu_priv *priv, u8 last_tx_page);
+ void (*init_phy_bb) (struct rtl8xxxu_priv *priv);
+ int (*init_phy_rf) (struct rtl8xxxu_priv *priv);
void (*phy_init_antenna_selection) (struct rtl8xxxu_priv *priv);
void (*phy_iq_calibrate) (struct rtl8xxxu_priv *priv);
void (*config_channel) (struct ieee80211_hw *hw);
@@ -1269,6 +1321,7 @@ struct rtl8xxxu_fileops {
void (*init_statistics) (struct rtl8xxxu_priv *priv);
void (*enable_rf) (struct rtl8xxxu_priv *priv);
void (*disable_rf) (struct rtl8xxxu_priv *priv);
+ void (*usb_quirks) (struct rtl8xxxu_priv *priv);
void (*set_tx_power) (struct rtl8xxxu_priv *priv, int channel,
bool ht40);
void (*update_rate_mask) (struct rtl8xxxu_priv *priv,
@@ -1276,12 +1329,98 @@ struct rtl8xxxu_fileops {
void (*report_connect) (struct rtl8xxxu_priv *priv,
u8 macid, bool connect);
int writeN_block_size;
- u16 mbox_ext_reg;
- char mbox_ext_width;
char tx_desc_size;
+ char rx_desc_size;
char has_s0s1;
u32 adda_1t_init;
u32 adda_1t_path_on;
u32 adda_2t_path_on_a;
u32 adda_2t_path_on_b;
+ u16 trxff_boundary;
+ u8 pbp_rx;
+ u8 pbp_tx;
+ struct rtl8xxxu_reg8val *mactable;
+ u8 total_page_num;
+ u8 page_num_hi;
+ u8 page_num_lo;
+ u8 page_num_norm;
};
+
+extern int rtl8xxxu_debug;
+
+extern struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
+extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[];
+u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr);
+u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr);
+u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr);
+int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val);
+int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val);
+int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val);
+u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg);
+int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg, u32 data);
+void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count);
+void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count);
+void rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup);
+void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup);
+void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+ bool path_a_on);
+void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+ const u32 *regs, u32 *backup);
+void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only);
+void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only);
+int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *table,
+ enum rtl8xxxu_rfpath path);
+int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_reg32val *array);
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name);
+void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start);
+int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv,
+ struct h2c_cmd *h2c, int len);
+int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page);
+void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv,
+ int channel, bool ht40);
+void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw);
+void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw);
+void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi);
+void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi);
+void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect);
+void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect);
+void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv);
+void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv);
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status);
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status);
+int rtl8xxxu_gen2_channel_to_group(int channel);
+bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2);
+
+extern struct rtl8xxxu_fileops rtl8192cu_fops;
+extern struct rtl8xxxu_fileops rtl8192eu_fops;
+extern struct rtl8xxxu_fileops rtl8723au_fops;
+extern struct rtl8xxxu_fileops rtl8723bu_fops;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
new file mode 100644
index 000000000..08066dec1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
@@ -0,0 +1,586 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8188c/8188r/8192c specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+#ifdef CONFIG_RTL8XXXU_UNTESTED
+static struct rtl8xxxu_power_base rtl8192c_power_base = {
+ .reg_0e00 = 0x07090c0c,
+ .reg_0e04 = 0x01020405,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x0b0c0c0e,
+ .reg_0e14 = 0x01030506,
+ .reg_0e18 = 0x0b0c0d0e,
+ .reg_0e1c = 0x01030509,
+
+ .reg_0830 = 0x07090c0c,
+ .reg_0834 = 0x01020405,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x0b0c0d0e,
+ .reg_0848 = 0x01030509,
+ .reg_084c = 0x0b0c0d0e,
+ .reg_0868 = 0x01030509,
+};
+
+static struct rtl8xxxu_power_base rtl8188r_power_base = {
+ .reg_0e00 = 0x06080808,
+ .reg_0e04 = 0x00040406,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x04060608,
+ .reg_0e14 = 0x00020204,
+ .reg_0e18 = 0x04060608,
+ .reg_0e1c = 0x00020204,
+
+ .reg_0830 = 0x06080808,
+ .reg_0834 = 0x00040406,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x04060608,
+ .reg_0848 = 0x00020204,
+ .reg_084c = 0x04060608,
+ .reg_0868 = 0x00020204,
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287af}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x00014297},
+ {0x13, 0x00010295}, {0x13, 0x0000c298},
+ {0x13, 0x0000819c}, {0x13, 0x000040a8},
+ {0x13, 0x0000001c}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f424},
+ {0x15, 0x0004f424}, {0x15, 0x0008f424},
+ {0x15, 0x000cf424}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00010255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00018c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
+ {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00000255},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
+ {0x1f, 0x00080001}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x0000083c},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x000d8000}, {0x12, 0x00090000},
+ {0x12, 0x00051000}, {0x12, 0x00012000},
+ {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
+ {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
+ {0x13, 0x000183a4}, {0x13, 0x00014398},
+ {0x13, 0x000101a4}, {0x13, 0x0000c198},
+ {0x13, 0x000080a4}, {0x13, 0x00004098},
+ {0x13, 0x00000000}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f405},
+ {0x15, 0x0004f405}, {0x15, 0x0008f405},
+ {0x15, 0x000cf405}, {0x16, 0x000e0330},
+ {0x16, 0x000a0330}, {0x16, 0x00060330},
+ {0x16, 0x00020330}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00080003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00044457}, {0x1f, 0x00080000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (!priv->vendor_umc)
+ fw_name = "/*(DEBLOBBED)*/";
+ else if (priv->chip_cut || priv->rtl_chip == RTL8192C)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
+ memcpy(priv->ht40_2s_tx_power_index_diff,
+ efuse->ht40_2s_tx_power_index_diff,
+ sizeof(efuse->ht40_2s_tx_power_index_diff));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.20s\n",
+ efuse->device_name);
+
+ priv->power_base = &rtl8192c_power_base;
+
+ if (efuse->rf_regulatory & 0x20) {
+ sprintf(priv->chip_name, "8188RU");
+ priv->rtl_chip = RTL8188R;
+ priv->hi_pa = 1;
+ priv->no_pape = 1;
+ priv->power_base = &rtl8188r_power_base;
+ }
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192cu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ return 0;
+}
+
+static int rtl8192cu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8xxxu_rfregval *rftable;
+ int ret;
+
+ if (priv->rtl_chip == RTL8188R) {
+ rftable = rtl8188ru_radioa_1t_highpa_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ } else if (priv->rf_paths == 1) {
+ rftable = rtl8192cu_radioa_1t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ } else {
+ rftable = rtl8192cu_radioa_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
+ if (ret)
+ goto exit;
+ rftable = rtl8192cu_radiob_2t_init_table;
+ ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int i;
+
+ for (i = 100; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ if (val8 & APS_FSMCO_PFM_ALDN)
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: Poll failed\n", __func__);
+ return -ENODEV;
+ }
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+ rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
+ if (!(val8 & LDOV12D_ENABLE)) {
+ pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
+ val8 |= LDOV12D_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
+
+ udelay(100);
+
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_MD2PP;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+ }
+
+ /*
+ * Auto enable WLAN
+ */
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ val16 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ for (i = 1000; i; i--) {
+ val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
+ if (!(val16 & APS_FSMCO_MAC_ENABLE))
+ break;
+ }
+ if (!i) {
+ pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable radio, GPIO, LED
+ */
+ val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
+ APS_FSMCO_PFM_ALDN;
+ rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
+
+ /*
+ * Release RF digital isolation
+ */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
+ val16 &= ~SYS_ISO_DIOR;
+ rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
+
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ val8 &= ~APSD_CTRL_OFF;
+ rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
+ for (i = 200; i; i--) {
+ val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
+ if (!(val8 & APSD_CTRL_OFF_STATUS))
+ break;
+ }
+
+ if (!i) {
+ pr_info("%s: APSD_CTRL poll failed\n", __func__);
+ return -EBUSY;
+ }
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
+ CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ rtl8xxxu_write8(priv, 0xfe10, 0x19);
+
+ /*
+ * Workaround for 8188RU LNA power leakage problem.
+ */
+ if (priv->rtl_chip == RTL8188R) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
+ val32 &= ~BIT(1);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ }
+ return 0;
+}
+
+struct rtl8xxxu_fileops rtl8192cu_fops = {
+ .parse_efuse = rtl8192cu_parse_efuse,
+ .load_firmware = rtl8192cu_load_firmware,
+ .power_on = rtl8192cu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+ .init_phy_rf = rtl8192cu_init_phy_rf,
+ .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen1_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .enable_rf = rtl8xxxu_gen1_enable_rf,
+ .disable_rf = rtl8xxxu_gen1_disable_rf,
+ .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+ .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+ .update_rate_mask = rtl8xxxu_update_rate_mask,
+ .report_connect = rtl8xxxu_gen1_report_connect,
+ .writeN_block_size = 128,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
+ .trxff_boundary = 0x27ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8xxxu_gen1_mac_init_table,
+};
+#endif
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
new file mode 100644
index 000000000..9461ecd31
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
@@ -0,0 +1,1525 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8192e specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
+ {0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
+ {0x428, 0x0a}, {0x429, 0x10}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+ {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+ {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+ {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+ {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff}, {0x4cd, 0xff},
+ {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2}, {0x502, 0x2f},
+ {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3}, {0x506, 0x5e},
+ {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4}, {0x50a, 0x5e},
+ {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4}, {0x50e, 0x00},
+ {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a}, {0x516, 0x0a},
+ {0x525, 0x4f}, {0x540, 0x12}, {0x541, 0x64}, {0x550, 0x10},
+ {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50}, {0x55d, 0xff},
+ {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a}, {0x620, 0xff},
+ {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff}, {0x624, 0xff},
+ {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff}, {0x638, 0x50},
+ {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e}, {0x63f, 0x0e},
+ {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00}, {0x652, 0xc8},
+ {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43}, {0x702, 0x65},
+ {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43}, {0x70a, 0x65},
+ {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8192eu_phy_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02220385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00390204},
+ {0x828, 0x01000100}, {0x82c, 0x00390204},
+ {0x830, 0x32323232}, {0x834, 0x30303030},
+ {0x838, 0x30303030}, {0x83c, 0x30303030},
+ {0x840, 0x00010000}, {0x844, 0x00010000},
+ {0x848, 0x28282828}, {0x84c, 0x28282828},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x009a009a}, {0x85c, 0x01000014},
+ {0x860, 0x66f60000}, {0x864, 0x061f0000},
+ {0x868, 0x30303030}, {0x86c, 0x30303030},
+ {0x870, 0x00000000}, {0x874, 0x55004200},
+ {0x878, 0x08080808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xcc0000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x900, 0x00000000},
+ {0x904, 0x00000023}, {0x908, 0x00000000},
+ {0x90c, 0x81121313}, {0x910, 0x806c0001},
+ {0x914, 0x00000001}, {0x918, 0x00000000},
+ {0x91c, 0x00010000}, {0x924, 0x00000001},
+ {0x928, 0x00000000}, {0x92c, 0x00000000},
+ {0x930, 0x00000000}, {0x934, 0x00000000},
+ {0x938, 0x00000000}, {0x93c, 0x00000000},
+ {0x940, 0x00000000}, {0x944, 0x00000000},
+ {0x94c, 0x00000008}, {0xa00, 0x00d0c7c8},
+ {0xa04, 0x81ff000c}, {0xa08, 0x8c838300},
+ {0xa0c, 0x2e68120f}, {0xa10, 0x95009b78},
+ {0xa14, 0x1114d028}, {0xa18, 0x00881117},
+ {0xa1c, 0x89140f00}, {0xa20, 0x1a1b0000},
+ {0xa24, 0x090e1317}, {0xa28, 0x00000204},
+ {0xa2c, 0x00d30000}, {0xa70, 0x101fff00},
+ {0xa74, 0x00000007}, {0xa78, 0x00000900},
+ {0xa7c, 0x225b0606}, {0xa80, 0x218075b1},
+ {0xb38, 0x00000000}, {0xc00, 0x48071d40},
+ {0xc04, 0x03a05633}, {0xc08, 0x000000e4},
+ {0xc0c, 0x6c6c6c6c}, {0xc10, 0x08800000},
+ {0xc14, 0x40000100}, {0xc18, 0x08800000},
+ {0xc1c, 0x40000100}, {0xc20, 0x00000000},
+ {0xc24, 0x00000000}, {0xc28, 0x00000000},
+ {0xc2c, 0x00000000}, {0xc30, 0x69e9ac47},
+ {0xc34, 0x469652af}, {0xc38, 0x49795994},
+ {0xc3c, 0x0a97971c}, {0xc40, 0x1f7c403f},
+ {0xc44, 0x000100b7}, {0xc48, 0xec020107},
+ {0xc4c, 0x007f037f},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc50, 0x00340220},
+#else
+ {0xc50, 0x00340020},
+#endif
+ {0xc54, 0x0080801f},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc58, 0x00000220},
+#else
+ {0xc58, 0x00000020},
+#endif
+ {0xc5c, 0x00248492}, {0xc60, 0x00000000},
+ {0xc64, 0x7112848b}, {0xc68, 0x47c00bff},
+ {0xc6c, 0x00000036}, {0xc70, 0x00000600},
+ {0xc74, 0x02013169}, {0xc78, 0x0000001f},
+ {0xc7c, 0x00b91612},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc80, 0x2d4000b5},
+#else
+ {0xc80, 0x40000100},
+#endif
+ {0xc84, 0x21f60000},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0xc88, 0x2d4000b5},
+#else
+ {0xc88, 0x40000100},
+#endif
+ {0xc8c, 0xa0e40000}, {0xc90, 0x00121820},
+ {0xc94, 0x00000000}, {0xc98, 0x00121820},
+ {0xc9c, 0x00007f7f}, {0xca0, 0x00000000},
+ {0xca4, 0x000300a0}, {0xca8, 0x00000000},
+ {0xcac, 0x00000000}, {0xcb0, 0x00000000},
+ {0xcb4, 0x00000000}, {0xcb8, 0x00000000},
+ {0xcbc, 0x28000000}, {0xcc0, 0x00000000},
+ {0xcc4, 0x00000000}, {0xcc8, 0x00000000},
+ {0xccc, 0x00000000}, {0xcd0, 0x00000000},
+ {0xcd4, 0x00000000}, {0xcd8, 0x64b22427},
+ {0xcdc, 0x00766932}, {0xce0, 0x00222222},
+ {0xce4, 0x00040000}, {0xce8, 0x77644302},
+ {0xcec, 0x2f97d40c}, {0xd00, 0x00080740},
+ {0xd04, 0x00020403}, {0xd08, 0x0000907f},
+ {0xd0c, 0x20010201}, {0xd10, 0xa0633333},
+ {0xd14, 0x3333bc43}, {0xd18, 0x7a8f5b6b},
+ {0xd1c, 0x0000007f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xd80, 0x01081008},
+ {0xd84, 0x00000800}, {0xd88, 0xf0b50000},
+ {0xe00, 0x30303030}, {0xe04, 0x30303030},
+ {0xe08, 0x03903030}, {0xe10, 0x30303030},
+ {0xe14, 0x30303030}, {0xe18, 0x30303030},
+ {0xe1c, 0x30303030}, {0xe28, 0x00000000},
+ {0xe30, 0x1000dc1f}, {0xe34, 0x10008c1f},
+ {0xe38, 0x02140102}, {0xe3c, 0x681604c2},
+ {0xe40, 0x01007c00}, {0xe44, 0x01004800},
+ {0xe48, 0xfb000000}, {0xe4c, 0x000028d1},
+ {0xe50, 0x1000dc1f}, {0xe54, 0x10008c1f},
+ {0xe58, 0x02140102}, {0xe5c, 0x28160d05},
+ {0xe60, 0x00000008}, {0xe68, 0x0fc05656},
+ {0xe6c, 0x03c09696}, {0xe70, 0x03c09696},
+ {0xe74, 0x0c005656}, {0xe78, 0x0c005656},
+ {0xe7c, 0x0c005656}, {0xe80, 0x0c005656},
+ {0xe84, 0x03c09696}, {0xe88, 0x0c005656},
+ {0xe8c, 0x03c09696}, {0xed0, 0x03c09696},
+ {0xed4, 0x03c09696}, {0xed8, 0x03c09696},
+ {0xedc, 0x0000d6d6}, {0xee0, 0x0000d6d6},
+ {0xeec, 0x0fc01616}, {0xee4, 0xb0000c1c},
+ {0xee8, 0x00000001}, {0xf14, 0x00000003},
+ {0xf4c, 0x00000000}, {0xf00, 0x00000300},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_std_table[] = {
+ {0xc78, 0xfb000001}, {0xc78, 0xfb010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfb030001},
+ {0xc78, 0xfb040001}, {0xc78, 0xfb050001},
+ {0xc78, 0xfa060001}, {0xc78, 0xf9070001},
+ {0xc78, 0xf8080001}, {0xc78, 0xf7090001},
+ {0xc78, 0xf60a0001}, {0xc78, 0xf50b0001},
+ {0xc78, 0xf40c0001}, {0xc78, 0xf30d0001},
+ {0xc78, 0xf20e0001}, {0xc78, 0xf10f0001},
+ {0xc78, 0xf0100001}, {0xc78, 0xef110001},
+ {0xc78, 0xee120001}, {0xc78, 0xed130001},
+ {0xc78, 0xec140001}, {0xc78, 0xeb150001},
+ {0xc78, 0xea160001}, {0xc78, 0xe9170001},
+ {0xc78, 0xe8180001}, {0xc78, 0xe7190001},
+ {0xc78, 0xc81a0001}, {0xc78, 0xc71b0001},
+ {0xc78, 0xc61c0001}, {0xc78, 0x071d0001},
+ {0xc78, 0x061e0001}, {0xc78, 0x051f0001},
+ {0xc78, 0x04200001}, {0xc78, 0x03210001},
+ {0xc78, 0xaa220001}, {0xc78, 0xa9230001},
+ {0xc78, 0xa8240001}, {0xc78, 0xa7250001},
+ {0xc78, 0xa6260001}, {0xc78, 0x85270001},
+ {0xc78, 0x84280001}, {0xc78, 0x83290001},
+ {0xc78, 0x252a0001}, {0xc78, 0x242b0001},
+ {0xc78, 0x232c0001}, {0xc78, 0x222d0001},
+ {0xc78, 0x672e0001}, {0xc78, 0x662f0001},
+ {0xc78, 0x65300001}, {0xc78, 0x64310001},
+ {0xc78, 0x63320001}, {0xc78, 0x62330001},
+ {0xc78, 0x61340001}, {0xc78, 0x45350001},
+ {0xc78, 0x44360001}, {0xc78, 0x43370001},
+ {0xc78, 0x42380001}, {0xc78, 0x41390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfb400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfb420001}, {0xc78, 0xfb430001},
+ {0xc78, 0xfb440001}, {0xc78, 0xfb450001},
+ {0xc78, 0xfa460001}, {0xc78, 0xf9470001},
+ {0xc78, 0xf8480001}, {0xc78, 0xf7490001},
+ {0xc78, 0xf64a0001}, {0xc78, 0xf54b0001},
+ {0xc78, 0xf44c0001}, {0xc78, 0xf34d0001},
+ {0xc78, 0xf24e0001}, {0xc78, 0xf14f0001},
+ {0xc78, 0xf0500001}, {0xc78, 0xef510001},
+ {0xc78, 0xee520001}, {0xc78, 0xed530001},
+ {0xc78, 0xec540001}, {0xc78, 0xeb550001},
+ {0xc78, 0xea560001}, {0xc78, 0xe9570001},
+ {0xc78, 0xe8580001}, {0xc78, 0xe7590001},
+ {0xc78, 0xe65a0001}, {0xc78, 0xe55b0001},
+ {0xc78, 0xe45c0001}, {0xc78, 0xe35d0001},
+ {0xc78, 0xe25e0001}, {0xc78, 0xe15f0001},
+ {0xc78, 0x8a600001}, {0xc78, 0x89610001},
+ {0xc78, 0x88620001}, {0xc78, 0x87630001},
+ {0xc78, 0x86640001}, {0xc78, 0x85650001},
+ {0xc78, 0x84660001}, {0xc78, 0x83670001},
+ {0xc78, 0x82680001}, {0xc78, 0x6b690001},
+ {0xc78, 0x6a6a0001}, {0xc78, 0x696b0001},
+ {0xc78, 0x686c0001}, {0xc78, 0x676d0001},
+ {0xc78, 0x666e0001}, {0xc78, 0x656f0001},
+ {0xc78, 0x64700001}, {0xc78, 0x63710001},
+ {0xc78, 0x62720001}, {0xc78, 0x61730001},
+ {0xc78, 0x49740001}, {0xc78, 0x48750001},
+ {0xc78, 0x47760001}, {0xc78, 0x46770001},
+ {0xc78, 0x45780001}, {0xc78, 0x44790001},
+ {0xc78, 0x437a0001}, {0xc78, 0x427b0001},
+ {0xc78, 0x417c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x00040022}, {0xc50, 0x00040020},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8192eu_highpa_table[] = {
+ {0xc78, 0xfa000001}, {0xc78, 0xf9010001},
+ {0xc78, 0xf8020001}, {0xc78, 0xf7030001},
+ {0xc78, 0xf6040001}, {0xc78, 0xf5050001},
+ {0xc78, 0xf4060001}, {0xc78, 0xf3070001},
+ {0xc78, 0xf2080001}, {0xc78, 0xf1090001},
+ {0xc78, 0xf00a0001}, {0xc78, 0xef0b0001},
+ {0xc78, 0xee0c0001}, {0xc78, 0xed0d0001},
+ {0xc78, 0xec0e0001}, {0xc78, 0xeb0f0001},
+ {0xc78, 0xea100001}, {0xc78, 0xe9110001},
+ {0xc78, 0xe8120001}, {0xc78, 0xe7130001},
+ {0xc78, 0xe6140001}, {0xc78, 0xe5150001},
+ {0xc78, 0xe4160001}, {0xc78, 0xe3170001},
+ {0xc78, 0xe2180001}, {0xc78, 0xe1190001},
+ {0xc78, 0x8a1a0001}, {0xc78, 0x891b0001},
+ {0xc78, 0x881c0001}, {0xc78, 0x871d0001},
+ {0xc78, 0x861e0001}, {0xc78, 0x851f0001},
+ {0xc78, 0x84200001}, {0xc78, 0x83210001},
+ {0xc78, 0x82220001}, {0xc78, 0x6a230001},
+ {0xc78, 0x69240001}, {0xc78, 0x68250001},
+ {0xc78, 0x67260001}, {0xc78, 0x66270001},
+ {0xc78, 0x65280001}, {0xc78, 0x64290001},
+ {0xc78, 0x632a0001}, {0xc78, 0x622b0001},
+ {0xc78, 0x612c0001}, {0xc78, 0x602d0001},
+ {0xc78, 0x472e0001}, {0xc78, 0x462f0001},
+ {0xc78, 0x45300001}, {0xc78, 0x44310001},
+ {0xc78, 0x43320001}, {0xc78, 0x42330001},
+ {0xc78, 0x41340001}, {0xc78, 0x40350001},
+ {0xc78, 0x40360001}, {0xc78, 0x40370001},
+ {0xc78, 0x40380001}, {0xc78, 0x40390001},
+ {0xc78, 0x403a0001}, {0xc78, 0x403b0001},
+ {0xc78, 0x403c0001}, {0xc78, 0x403d0001},
+ {0xc78, 0x403e0001}, {0xc78, 0x403f0001},
+ {0xc78, 0xfa400001}, {0xc78, 0xf9410001},
+ {0xc78, 0xf8420001}, {0xc78, 0xf7430001},
+ {0xc78, 0xf6440001}, {0xc78, 0xf5450001},
+ {0xc78, 0xf4460001}, {0xc78, 0xf3470001},
+ {0xc78, 0xf2480001}, {0xc78, 0xf1490001},
+ {0xc78, 0xf04a0001}, {0xc78, 0xef4b0001},
+ {0xc78, 0xee4c0001}, {0xc78, 0xed4d0001},
+ {0xc78, 0xec4e0001}, {0xc78, 0xeb4f0001},
+ {0xc78, 0xea500001}, {0xc78, 0xe9510001},
+ {0xc78, 0xe8520001}, {0xc78, 0xe7530001},
+ {0xc78, 0xe6540001}, {0xc78, 0xe5550001},
+ {0xc78, 0xe4560001}, {0xc78, 0xe3570001},
+ {0xc78, 0xe2580001}, {0xc78, 0xe1590001},
+ {0xc78, 0x8a5a0001}, {0xc78, 0x895b0001},
+ {0xc78, 0x885c0001}, {0xc78, 0x875d0001},
+ {0xc78, 0x865e0001}, {0xc78, 0x855f0001},
+ {0xc78, 0x84600001}, {0xc78, 0x83610001},
+ {0xc78, 0x82620001}, {0xc78, 0x6a630001},
+ {0xc78, 0x69640001}, {0xc78, 0x68650001},
+ {0xc78, 0x67660001}, {0xc78, 0x66670001},
+ {0xc78, 0x65680001}, {0xc78, 0x64690001},
+ {0xc78, 0x636a0001}, {0xc78, 0x626b0001},
+ {0xc78, 0x616c0001}, {0xc78, 0x606d0001},
+ {0xc78, 0x476e0001}, {0xc78, 0x466f0001},
+ {0xc78, 0x45700001}, {0xc78, 0x44710001},
+ {0xc78, 0x43720001}, {0xc78, 0x42730001},
+ {0xc78, 0x41740001}, {0xc78, 0x40750001},
+ {0xc78, 0x40760001}, {0xc78, 0x40770001},
+ {0xc78, 0x40780001}, {0xc78, 0x40790001},
+ {0xc78, 0x407a0001}, {0xc78, 0x407b0001},
+ {0xc78, 0x407c0001}, {0xc78, 0x407d0001},
+ {0xc78, 0x407e0001}, {0xc78, 0x407f0001},
+ {0xc50, 0x00040222}, {0xc50, 0x00040220},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radioa_init_table[] = {
+ {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1b, 0x00000064}, {0x1e, 0x00080009},
+ {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+ {0x3f, 0x00000000}, {0x42, 0x000060c0},
+ {0x57, 0x000d0000}, {0x58, 0x000be180},
+ {0x67, 0x00001552}, {0x83, 0x00000000},
+ {0xb0, 0x000ff9f1}, {0xb1, 0x00055418},
+ {0xb2, 0x0008cc00}, {0xb4, 0x00043083},
+ {0xb5, 0x00008166}, {0xb6, 0x0000803e},
+ {0xb7, 0x0001c69f}, {0xb8, 0x0000407f},
+ {0xb9, 0x00080001}, {0xba, 0x00040001},
+ {0xbb, 0x00000400}, {0xbf, 0x000c0000},
+ {0xc2, 0x00002400}, {0xc3, 0x00000009},
+ {0xc4, 0x00040c91}, {0xc5, 0x00099999},
+ {0xc6, 0x000000a3}, {0xc7, 0x00088820},
+ {0xc8, 0x00076c06}, {0xc9, 0x00000000},
+ {0xca, 0x00080000}, {0xdf, 0x00000180},
+ {0xef, 0x000001a0}, {0x51, 0x00069545},
+ {0x52, 0x0007e45e}, {0x53, 0x00000071},
+ {0x56, 0x00051ff3}, {0x35, 0x000000a8},
+ {0x35, 0x000001e2}, {0x35, 0x000002a8},
+ {0x36, 0x00001c24}, {0x36, 0x00009c24},
+ {0x36, 0x00011c24}, {0x36, 0x00019c24},
+ {0x18, 0x00000c07}, {0x5a, 0x00048000},
+ {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x34, 0x0000a093}, {0x34, 0x0000908f},
+ {0x34, 0x0000808c}, {0x34, 0x0000704d},
+ {0x34, 0x0000604a}, {0x34, 0x00005047},
+ {0x34, 0x0000400a}, {0x34, 0x00003007},
+ {0x34, 0x00002004}, {0x34, 0x00001001},
+ {0x34, 0x00000000},
+#else
+ /* Regular */
+ {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+ {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+ {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+ {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+ {0x34, 0x0000244f}, {0x34, 0x0000144c},
+ {0x34, 0x00000014},
+#endif
+ {0x00, 0x00030159},
+ {0x84, 0x00068180},
+ {0x86, 0x0000014e},
+ {0x87, 0x00048e00},
+ {0x8e, 0x00065540},
+ {0x8f, 0x00088000},
+ {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000f07b0},
+#else
+ {0x3b, 0x000f02b0},
+#endif
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000787b0},
+#else
+ {0x3b, 0x00078730},
+#endif
+ {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+ {0x3b, 0x00040620}, {0x3b, 0x00037090},
+ {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+ {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+ {0xfe, 0x00000000}, {0x18, 0x0000fc07},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00000001}, {0x1f, 0x00080000},
+ {0x00, 0x00033e70},
+ {0xff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8192eu_radiob_init_table[] = {
+ {0x7f, 0x00000082}, {0x81, 0x0003fc00},
+ {0x00, 0x00030000}, {0x08, 0x00008400},
+ {0x18, 0x00000407}, {0x19, 0x00000012},
+ {0x1b, 0x00000064}, {0x1e, 0x00080009},
+ {0x1f, 0x00000880}, {0x2f, 0x0001a060},
+ {0x3f, 0x00000000}, {0x42, 0x000060c0},
+ {0x57, 0x000d0000}, {0x58, 0x000be180},
+ {0x67, 0x00001552}, {0x7f, 0x00000082},
+ {0x81, 0x0003f000}, {0x83, 0x00000000},
+ {0xdf, 0x00000180}, {0xef, 0x000001a0},
+ {0x51, 0x00069545}, {0x52, 0x0007e42e},
+ {0x53, 0x00000071}, {0x56, 0x00051ff3},
+ {0x35, 0x000000a8}, {0x35, 0x000001e0},
+ {0x35, 0x000002a8}, {0x36, 0x00001ca8},
+ {0x36, 0x00009c24}, {0x36, 0x00011c24},
+ {0x36, 0x00019c24}, {0x18, 0x00000c07},
+ {0x5a, 0x00048000}, {0x19, 0x000739d0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x34, 0x0000a093}, {0x34, 0x0000908f},
+ {0x34, 0x0000808c}, {0x34, 0x0000704d},
+ {0x34, 0x0000604a}, {0x34, 0x00005047},
+ {0x34, 0x0000400a}, {0x34, 0x00003007},
+ {0x34, 0x00002004}, {0x34, 0x00001001},
+ {0x34, 0x00000000},
+#else
+ {0x34, 0x0000add7}, {0x34, 0x00009dd4},
+ {0x34, 0x00008dd1}, {0x34, 0x00007dce},
+ {0x34, 0x00006dcb}, {0x34, 0x00005dc8},
+ {0x34, 0x00004dc5}, {0x34, 0x000034cc},
+ {0x34, 0x0000244f}, {0x34, 0x0000144c},
+ {0x34, 0x00000014},
+#endif
+ {0x00, 0x00030159}, {0x84, 0x00068180},
+ {0x86, 0x000000ce}, {0x87, 0x00048a00},
+ {0x8e, 0x00065540}, {0x8f, 0x00088000},
+ {0xef, 0x000020a0},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000f07b0},
+#else
+ {0x3b, 0x000f02b0},
+#endif
+
+ {0x3b, 0x000ef7b0}, {0x3b, 0x000d4fb0},
+ {0x3b, 0x000cf060}, {0x3b, 0x000b0090},
+ {0x3b, 0x000a0080}, {0x3b, 0x00090080},
+ {0x3b, 0x0008f780},
+#ifdef EXT_PA_8192EU
+ /* External PA or external LNA */
+ {0x3b, 0x000787b0},
+#else
+ {0x3b, 0x00078730},
+#endif
+ {0x3b, 0x00060fb0}, {0x3b, 0x0005ffa0},
+ {0x3b, 0x00040620}, {0x3b, 0x00037090},
+ {0x3b, 0x00020080}, {0x3b, 0x0001f060},
+ {0x3b, 0x0000ffb0}, {0xef, 0x000000a0},
+ {0x00, 0x00010159}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1e, 0x00000001},
+ {0x1f, 0x00080000}, {0x00, 0x00033e70},
+ {0xff, 0xffffffff}
+};
+
+static void
+rtl8192e_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, tx_idx;
+
+ tx_idx = 0;
+ group = rtl8xxxu_gen2_channel_to_group(channel);
+
+ cck = priv->cck_tx_power_index_A[group];
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_A[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].a;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_A[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].a;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].a;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs);
+
+ if (priv->tx_paths > 1) {
+ cck = priv->cck_tx_power_index_B[group];
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK1_55_MCS32);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK1_55_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xffffff00;
+ val32 |= cck;
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+ ofdm = ofdmbase | ofdmbase << 8 |
+ ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_B[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs);
+ }
+}
+
+static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+ memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+ sizeof(efuse->tx_power_index_B.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->tx_power_index_B.ht40_base,
+ sizeof(efuse->tx_power_index_B.ht40_base));
+
+ priv->ht20_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+ priv->ht20_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+ priv->ht40_tx_power_diff[0].a = 0;
+ priv->ht40_tx_power_diff[0].b = 0;
+
+ for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+ priv->ofdm_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+ priv->ofdm_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+ priv->ht20_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+ priv->ht20_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+ priv->ht40_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+ priv->ht40_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+ }
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
+ dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8192eu_efuse));
+ for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+ return 0;
+}
+
+static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+
+ return ret;
+}
+
+static void rtl8192eu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= (SYS_FUNC_USBA | SYS_FUNC_USBD | SYS_FUNC_DIO_RF |
+ SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB);
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+ rtl8xxxu_init_phy_regs(priv, rtl8192eu_phy_init_table);
+
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_highpa_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8192eu_std_table);
+}
+
+static int rtl8192eu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radioa_init_table, RF_A);
+ if (ret)
+ goto exit;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8192eu_radiob_init_table, RF_B);
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c;
+ int result = 0;
+
+ /*
+ * TX IQK
+ * PA/PAD controlled by 0x0
+ */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00180);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82140303);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+
+ return result;
+}
+
+static int rtl8192eu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x68160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000)) {
+ result |= 0x01;
+ } else {
+ /* PA/PAD controlled by 0x0 */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+ goto out;
+ }
+
+ val32 = 0x80007c00 |
+ (reg_e94 & 0x03ff0000) | ((reg_e9c >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path A RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static int rtl8192eu_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc;
+ int result = 0;
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00180);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Path B IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x821403e2);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00492911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000))
+ result |= 0x01;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B IQK failed!\n",
+ __func__);
+
+ return result;
+}
+
+static int rtl8192eu_rx_iqk_path_b(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, val32;
+ int result = 0;
+
+ /* Leave IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ /* Enable path A PA in TX IQK mode */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf117b);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* TX IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x68160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+
+ if (!(reg_eac & BIT(31)) &&
+ ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
+ ((reg_ebc & 0x03ff0000) != 0x00420000)) {
+ result |= 0x01;
+ } else {
+ /*
+ * PA/PAD controlled by 0x0
+ * Vendor driver restores RF_A here which I believe is a bug
+ */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+ goto out;
+ }
+
+ val32 = 0x80007c00 |
+ (reg_eb4 & 0x03ff0000) | ((reg_ebc >> 16) & 0x03ff);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /* Modify RX IQK mode table */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_WE_LUT, 0x800a0);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G1, 0x0000f);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_TXPA_G2, 0xf7ffa);
+
+ /* PA/PAD control by 0x56, and set = 0x0 */
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x00980);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_56, 0x51000);
+
+ /* Enter IQK mode */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* IQK setting */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* Path A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x18008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160c1f);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28160c1f);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a891);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xfa000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(10);
+
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
+ reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_UNKNOWN_DF, 0x180);
+
+ if (!(reg_eac & BIT(30)) &&
+ ((reg_ec4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_ecc & 0x03ff0000) != 0x00360000))
+ result |= 0x02;
+ else
+ dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
+ __func__);
+
+out:
+ return result;
+}
+
+static void rtl8192eu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok, path_b_ok;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_CCK0_AFE_SETTING
+ };
+ u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+ u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 |= 0x0f000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22208200);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
+ val32 |= BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, val32);
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XB_RF_INT_OE);
+ val32 |= BIT(10);
+ rtl8xxxu_write32(priv, REG_FPGA0_XB_RF_INT_OE, val32);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8192eu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8192eu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ if (priv->rf_paths > 1) {
+ /* Path A into standby */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x80800000);
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8192eu_iqk_path_b(priv);
+ if (path_b_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8192eu_rx_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+ }
+
+ /* Back to BB mode, load original value */
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, 0x00000000);
+
+ if (t) {
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+ if (priv->rf_paths > 1) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | xb_agc);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8192eu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ bool simu;
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ for (i = 0; i < 3; i++) {
+ rtl8192eu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 1, 2);
+ if (simu)
+ candidate = 1;
+ else
+ candidate = 3;
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->rf_paths > 1)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+}
+
+/*
+ * This is needed for 8723bu as well, presumable
+ */
+static void rtl8192e_crystal_afe_adjust(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+
+ /*
+ * 40Mhz crystal source, MAC 0x28[2]=0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ val8 &= 0xfb;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+ val32 &= 0xfffffc7f;
+ rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+
+ /*
+ * 92e AFE parameter
+ * AFE PLL KVCO selection, MAC 0x28[6]=1
+ */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ val8 &= 0xbf;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+
+ /*
+ * AFE PLL KVCO selection, MAC 0x78[21]=0
+ */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_CTRL4);
+ val32 &= 0xffdfffff;
+ rtl8xxxu_write32(priv, REG_AFE_CTRL4, val32);
+}
+
+static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+
+ /* Clear suspend enable and power down enable*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+}
+
+static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* disable HWPDN 0x04[15]=0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ ret = 0;
+
+ val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
+ if (val32 & SYS_CFG_SPS_LDO_SEL) {
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
+ } else {
+ /*
+ * Raise 1.2V voltage
+ */
+ val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL);
+ val32 &= 0xff0fffff;
+ val32 |= 0x00500000;
+ rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32);
+ rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
+ }
+
+ /*
+ * Adjust AFE before enabling PLL
+ */
+ rtl8192e_crystal_afe_adjust(priv);
+ rtl8192e_disabled_to_emu(priv);
+
+ ret = rtl8192e_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ rtl8xxxu_write16(priv, REG_CR, 0x0000);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+exit:
+ return ret;
+}
+
+static void rtl8192e_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+ u8 val8;
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+ /*
+ * WLAN action by PTA
+ */
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(24);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ /*
+ * Fix external switch Main->S1, Aux->S0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+}
+
+struct rtl8xxxu_fileops rtl8192eu_fops = {
+ .parse_efuse = rtl8192eu_parse_efuse,
+ .load_firmware = rtl8192eu_load_firmware,
+ .power_on = rtl8192eu_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .init_phy_bb = rtl8192eu_init_phy_bb,
+ .init_phy_rf = rtl8192eu_init_phy_rf,
+ .phy_iq_calibrate = rtl8192eu_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen2_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+ .enable_rf = rtl8192e_enable_rf,
+ .disable_rf = rtl8xxxu_gen2_disable_rf,
+ .usb_quirks = rtl8xxxu_gen2_usb_quirks,
+ .set_tx_power = rtl8192e_set_tx_power,
+ .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .writeN_block_size = 128,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ .has_s0s1 = 0,
+ .adda_1t_init = 0x0fc01616,
+ .adda_1t_path_on = 0x0fc01616,
+ .adda_2t_path_on_a = 0x0fc01616,
+ .adda_2t_path_on_b = 0x0fc01616,
+ .trxff_boundary = 0x3cff,
+ .mactable = rtl8192e_mac_init_table,
+ .total_page_num = TX_TOTAL_PAGE_NUM_8192E,
+ .page_num_hi = TX_PAGE_NUM_HI_PQ_8192E,
+ .page_num_lo = TX_PAGE_NUM_LO_PQ_8192E,
+ .page_num_norm = TX_PAGE_NUM_NORM_PQ_8192E,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
new file mode 100644
index 000000000..cd6bf209d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
@@ -0,0 +1,397 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8723a specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_power_base rtl8723a_power_base = {
+ .reg_0e00 = 0x0a0c0c0c,
+ .reg_0e04 = 0x02040608,
+ .reg_0e08 = 0x00000000,
+ .reg_086c = 0x00000000,
+
+ .reg_0e10 = 0x0a0c0d0e,
+ .reg_0e14 = 0x02040608,
+ .reg_0e18 = 0x0a0c0d0e,
+ .reg_0e1c = 0x02040608,
+
+ .reg_0830 = 0x0a0c0c0c,
+ .reg_0834 = 0x02040608,
+ .reg_0838 = 0x00000000,
+ .reg_086c_2 = 0x00000000,
+
+ .reg_083c = 0x0a0c0d0e,
+ .reg_0848 = 0x02040608,
+ .reg_084c = 0x0a0c0d0e,
+ .reg_0868 = 0x02040608,
+};
+
+static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
+ {0x00, 0x00030159}, {0x01, 0x00031284},
+ {0x02, 0x00098000}, {0x03, 0x00039c63},
+ {0x04, 0x000210e7}, {0x09, 0x0002044f},
+ {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
+ {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
+ {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
+ {0x19, 0x00000000}, {0x1a, 0x00030355},
+ {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
+ {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
+ {0x1f, 0x00000000}, {0x20, 0x0000b614},
+ {0x21, 0x0006c000}, {0x22, 0x00000000},
+ {0x23, 0x00001558}, {0x24, 0x00000060},
+ {0x25, 0x00000483}, {0x26, 0x0004f000},
+ {0x27, 0x000ec7d9}, {0x28, 0x00057730},
+ {0x29, 0x00004783}, {0x2a, 0x00000001},
+ {0x2b, 0x00021334}, {0x2a, 0x00000000},
+ {0x2b, 0x00000054}, {0x2a, 0x00000001},
+ {0x2b, 0x00000808}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000c}, {0x2a, 0x00000002},
+ {0x2b, 0x00000808}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000003},
+ {0x2b, 0x00000808}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000004},
+ {0x2b, 0x00000808}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000005},
+ {0x2b, 0x00000808}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000006},
+ {0x2b, 0x00000709}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000007},
+ {0x2b, 0x00000709}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000008},
+ {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x00000009},
+ {0x2b, 0x0000060a}, {0x2b, 0x00053333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
+ {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
+ {0x2b, 0x0000060a}, {0x2b, 0x00063333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
+ {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
+ {0x2b, 0x0000060a}, {0x2b, 0x00073333},
+ {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
+ {0x2b, 0x0000050b}, {0x2b, 0x00066666},
+ {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
+ {0x10, 0x0004000f}, {0x11, 0x000e31fc},
+ {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
+ {0x10, 0x0002000f}, {0x11, 0x000203f9},
+ {0x10, 0x0003000f}, {0x11, 0x000ff500},
+ {0x10, 0x00000000}, {0x11, 0x00000000},
+ {0x10, 0x0008000f}, {0x11, 0x0003f100},
+ {0x10, 0x0009000f}, {0x11, 0x00023100},
+ {0x12, 0x00032000}, {0x12, 0x00071000},
+ {0x12, 0x000b0000}, {0x12, 0x000fc000},
+ {0x13, 0x000287b3}, {0x13, 0x000244b7},
+ {0x13, 0x000204ab}, {0x13, 0x0001c49f},
+ {0x13, 0x00018493}, {0x13, 0x0001429b},
+ {0x13, 0x00010299}, {0x13, 0x0000c29c},
+ {0x13, 0x000081a0}, {0x13, 0x000040ac},
+ {0x13, 0x00000020}, {0x14, 0x0001944c},
+ {0x14, 0x00059444}, {0x14, 0x0009944c},
+ {0x14, 0x000d9444}, {0x15, 0x0000f474},
+ {0x15, 0x0004f477}, {0x15, 0x0008f455},
+ {0x15, 0x000cf455}, {0x16, 0x00000339},
+ {0x16, 0x00040339}, {0x16, 0x00080339},
+ {0x16, 0x000c0366}, {0x00, 0x00010159},
+ {0x18, 0x0000f401}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0x1f, 0x00000003},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0x1e, 0x00000247}, {0x1f, 0x00000000},
+ {0x00, 0x00030159},
+ {0xff, 0xffffffff}
+};
+
+static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A,
+ efuse->cck_tx_power_index_A,
+ sizeof(efuse->cck_tx_power_index_A));
+ memcpy(priv->cck_tx_power_index_B,
+ efuse->cck_tx_power_index_B,
+ sizeof(efuse->cck_tx_power_index_B));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->ht40_1s_tx_power_index_A,
+ sizeof(efuse->ht40_1s_tx_power_index_A));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->ht40_1s_tx_power_index_B,
+ sizeof(efuse->ht40_1s_tx_power_index_B));
+
+ memcpy(priv->ht20_tx_power_index_diff,
+ efuse->ht20_tx_power_index_diff,
+ sizeof(efuse->ht20_tx_power_index_diff));
+ memcpy(priv->ofdm_tx_power_index_diff,
+ efuse->ofdm_tx_power_index_diff,
+ sizeof(efuse->ofdm_tx_power_index_diff));
+
+ memcpy(priv->ht40_max_power_offset,
+ efuse->ht40_max_power_offset,
+ sizeof(efuse->ht40_max_power_offset));
+ memcpy(priv->ht20_max_power_offset,
+ efuse->ht20_max_power_offset,
+ sizeof(efuse->ht20_max_power_offset));
+
+ if (priv->efuse_wifi.efuse8723.version >= 0x01) {
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
+ }
+
+ priv->power_base = &rtl8723a_power_base;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n",
+ efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n",
+ efuse->device_name);
+ return 0;
+}
+
+static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ switch (priv->chip_cut) {
+ case 0:
+ fw_name = "/*(DEBLOBBED)*/";
+ break;
+ case 1:
+ if (priv->enable_bluetooth)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
+static int rtl8723au_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8723au_radioa_1t_init_table, RF_A);
+
+ /* Reduce 80M spur */
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
+ rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
+
+ return ret;
+}
+
+static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* disable SW LPS 0x04[10]= 0 */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(2);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* wait till 0x04[17] = 1 power ready*/
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* release WLON reset 0x04[16]= 1*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /* disable HWPDN 0x04[15]= 0*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~BIT(7);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* disable WL suspend*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 &= ~(BIT(3) | BIT(4));
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
+ /*
+ * Note: Vendor driver actually clears this bit, despite the
+ * documentation claims it's being set!
+ */
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ val8 &= ~LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ /*
+ * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
+ */
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
+
+ rtl8xxxu_disabled_to_emu(priv);
+
+ ret = rtl8723a_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * 0x0004[19] = 1, reset 8051
+ */
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
+ val8 |= BIT(3);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /* For EFuse PG */
+ val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
+ val32 &= ~(BIT(28) | BIT(29) | BIT(30));
+ val32 |= (0x06 << 28);
+ rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
+exit:
+ return ret;
+}
+
+struct rtl8xxxu_fileops rtl8723au_fops = {
+ .parse_efuse = rtl8723au_parse_efuse,
+ .load_firmware = rtl8723au_load_firmware,
+ .power_on = rtl8723au_power_on,
+ .power_off = rtl8xxxu_power_off,
+ .reset_8051 = rtl8xxxu_reset_8051,
+ .llt_init = rtl8xxxu_init_llt_table,
+ .init_phy_bb = rtl8xxxu_gen1_init_phy_bb,
+ .init_phy_rf = rtl8723au_init_phy_rf,
+ .phy_iq_calibrate = rtl8xxxu_gen1_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen1_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc16,
+ .enable_rf = rtl8xxxu_gen1_enable_rf,
+ .disable_rf = rtl8xxxu_gen1_disable_rf,
+ .usb_quirks = rtl8xxxu_gen1_usb_quirks,
+ .set_tx_power = rtl8xxxu_gen1_set_tx_power,
+ .update_rate_mask = rtl8xxxu_update_rate_mask,
+ .report_connect = rtl8xxxu_gen1_report_connect,
+ .writeN_block_size = 1024,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc16),
+ .adda_1t_init = 0x0b1b25a0,
+ .adda_1t_path_on = 0x0bdb25a0,
+ .adda_2t_path_on_a = 0x04db25a4,
+ .adda_2t_path_on_b = 0x0b1b25a4,
+ .trxff_boundary = 0x27ff,
+ .pbp_rx = PBP_PAGE_SIZE_128,
+ .pbp_tx = PBP_PAGE_SIZE_128,
+ .mactable = rtl8xxxu_gen1_mac_init_table,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
new file mode 100644
index 000000000..6f42f0a16
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
@@ -0,0 +1,1682 @@
+/*
+ * RTL8XXXU mac80211 USB driver - 8723b specific subdriver
+ *
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
+ *
+ * Portions, notably calibration code:
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This driver was written as a replacement for the vendor provided
+ * rtl8723au driver. As the Realtek 8xxx chips are very similar in
+ * their programming interface, I have started adding support for
+ * additional 8xxx chips like the 8192cu, 8188cus, etc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+#include <linux/usb.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/wireless.h>
+#include <linux/firmware.h>
+#include <linux/moduleparam.h>
+#include <net/mac80211.h>
+#include "rtl8xxxu.h"
+#include "rtl8xxxu_regs.h"
+
+static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
+ {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
+ {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
+ {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
+ {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
+ {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
+ {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
+ {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
+ {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x516, 0x0a}, {0x525, 0x4f},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff},
+ {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff},
+ {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00},
+ {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04},
+ {0xffff, 0xff},
+};
+
+static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
+ {0x800, 0x80040000}, {0x804, 0x00000003},
+ {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
+ {0x810, 0x10001331}, {0x814, 0x020c3d10},
+ {0x818, 0x02200385}, {0x81c, 0x00000000},
+ {0x820, 0x01000100}, {0x824, 0x00190204},
+ {0x828, 0x00000000}, {0x82c, 0x00000000},
+ {0x830, 0x00000000}, {0x834, 0x00000000},
+ {0x838, 0x00000000}, {0x83c, 0x00000000},
+ {0x840, 0x00010000}, {0x844, 0x00000000},
+ {0x848, 0x00000000}, {0x84c, 0x00000000},
+ {0x850, 0x00000000}, {0x854, 0x00000000},
+ {0x858, 0x569a11a9}, {0x85c, 0x01000014},
+ {0x860, 0x66f60110}, {0x864, 0x061f0649},
+ {0x868, 0x00000000}, {0x86c, 0x27272700},
+ {0x870, 0x07000760}, {0x874, 0x25004000},
+ {0x878, 0x00000808}, {0x87c, 0x00000000},
+ {0x880, 0xb0000c1c}, {0x884, 0x00000001},
+ {0x888, 0x00000000}, {0x88c, 0xccc000c0},
+ {0x890, 0x00000800}, {0x894, 0xfffffffe},
+ {0x898, 0x40302010}, {0x89c, 0x00706050},
+ {0x900, 0x00000000}, {0x904, 0x00000023},
+ {0x908, 0x00000000}, {0x90c, 0x81121111},
+ {0x910, 0x00000002}, {0x914, 0x00000201},
+ {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
+ {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
+ {0xa10, 0x9500bb78}, {0xa14, 0x1114d028},
+ {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
+ {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
+ {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
+ {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
+ {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
+ {0xa80, 0x21806490}, {0xb2c, 0x00000000},
+ {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
+ {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
+ {0xc10, 0x08800000}, {0xc14, 0x40000100},
+ {0xc18, 0x08800000}, {0xc1c, 0x40000100},
+ {0xc20, 0x00000000}, {0xc24, 0x00000000},
+ {0xc28, 0x00000000}, {0xc2c, 0x00000000},
+ {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
+ {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
+ {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
+ {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
+ {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
+ {0xc58, 0x00013149}, {0xc5c, 0x00250492},
+ {0xc60, 0x00000000}, {0xc64, 0x7112848b},
+ {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
+ {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
+ {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
+ {0xc80, 0x390000e4}, {0xc84, 0x20f60000},
+ {0xc88, 0x40000100}, {0xc8c, 0x20200000},
+ {0xc90, 0x00020e1a}, {0xc94, 0x00000000},
+ {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f},
+ {0xca0, 0x00000000}, {0xca4, 0x000300a0},
+ {0xca8, 0x00000000}, {0xcac, 0x00000000},
+ {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
+ {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
+ {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
+ {0xcc8, 0x00000000}, {0xccc, 0x00000000},
+ {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
+ {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
+ {0xce0, 0x00222222}, {0xce4, 0x00000000},
+ {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
+ {0xd00, 0x00000740}, {0xd04, 0x40020401},
+ {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
+ {0xd10, 0xa0633333}, {0xd14, 0x3333bc53},
+ {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
+ {0xd30, 0x00000000}, {0xd34, 0x80608000},
+ {0xd38, 0x00000000}, {0xd3c, 0x00127353},
+ {0xd40, 0x00000000}, {0xd44, 0x00000000},
+ {0xd48, 0x00000000}, {0xd4c, 0x00000000},
+ {0xd50, 0x6437140a}, {0xd54, 0x00000000},
+ {0xd58, 0x00000282}, {0xd5c, 0x30032064},
+ {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
+ {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
+ {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
+ {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
+ {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
+ {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
+ {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
+ {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
+ {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
+ {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
+ {0xe44, 0x01004800}, {0xe48, 0xfb000000},
+ {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
+ {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
+ {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
+ {0xe68, 0x001b2556}, {0xe6c, 0x00c00096},
+ {0xe70, 0x00c00096}, {0xe74, 0x01000056},
+ {0xe78, 0x01000014}, {0xe7c, 0x01000056},
+ {0xe80, 0x01000014}, {0xe84, 0x00c00096},
+ {0xe88, 0x01000056}, {0xe8c, 0x00c00096},
+ {0xed0, 0x00c00096}, {0xed4, 0x00c00096},
+ {0xed8, 0x00c00096}, {0xedc, 0x000000d6},
+ {0xee0, 0x000000d6}, {0xeec, 0x01c00016},
+ {0xf14, 0x00000003}, {0xf4c, 0x00000000},
+ {0xf00, 0x00000300},
+ {0x820, 0x01000100}, {0x800, 0x83040000},
+ {0xffff, 0xffffffff},
+};
+
+static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
+ {0xc78, 0xfd000001}, {0xc78, 0xfc010001},
+ {0xc78, 0xfb020001}, {0xc78, 0xfa030001},
+ {0xc78, 0xf9040001}, {0xc78, 0xf8050001},
+ {0xc78, 0xf7060001}, {0xc78, 0xf6070001},
+ {0xc78, 0xf5080001}, {0xc78, 0xf4090001},
+ {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001},
+ {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001},
+ {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001},
+ {0xc78, 0xed100001}, {0xc78, 0xec110001},
+ {0xc78, 0xeb120001}, {0xc78, 0xea130001},
+ {0xc78, 0xe9140001}, {0xc78, 0xe8150001},
+ {0xc78, 0xe7160001}, {0xc78, 0xe6170001},
+ {0xc78, 0xe5180001}, {0xc78, 0xe4190001},
+ {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001},
+ {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001},
+ {0xc78, 0x671e0001}, {0xc78, 0x661f0001},
+ {0xc78, 0x65200001}, {0xc78, 0x64210001},
+ {0xc78, 0x63220001}, {0xc78, 0x4a230001},
+ {0xc78, 0x49240001}, {0xc78, 0x48250001},
+ {0xc78, 0x47260001}, {0xc78, 0x46270001},
+ {0xc78, 0x45280001}, {0xc78, 0x44290001},
+ {0xc78, 0x432a0001}, {0xc78, 0x422b0001},
+ {0xc78, 0x292c0001}, {0xc78, 0x282d0001},
+ {0xc78, 0x272e0001}, {0xc78, 0x262f0001},
+ {0xc78, 0x0a300001}, {0xc78, 0x09310001},
+ {0xc78, 0x08320001}, {0xc78, 0x07330001},
+ {0xc78, 0x06340001}, {0xc78, 0x05350001},
+ {0xc78, 0x04360001}, {0xc78, 0x03370001},
+ {0xc78, 0x02380001}, {0xc78, 0x01390001},
+ {0xc78, 0x013a0001}, {0xc78, 0x013b0001},
+ {0xc78, 0x013c0001}, {0xc78, 0x013d0001},
+ {0xc78, 0x013e0001}, {0xc78, 0x013f0001},
+ {0xc78, 0xfc400001}, {0xc78, 0xfb410001},
+ {0xc78, 0xfa420001}, {0xc78, 0xf9430001},
+ {0xc78, 0xf8440001}, {0xc78, 0xf7450001},
+ {0xc78, 0xf6460001}, {0xc78, 0xf5470001},
+ {0xc78, 0xf4480001}, {0xc78, 0xf3490001},
+ {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001},
+ {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001},
+ {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001},
+ {0xc78, 0xec500001}, {0xc78, 0xeb510001},
+ {0xc78, 0xea520001}, {0xc78, 0xe9530001},
+ {0xc78, 0xe8540001}, {0xc78, 0xe7550001},
+ {0xc78, 0xe6560001}, {0xc78, 0xe5570001},
+ {0xc78, 0xe4580001}, {0xc78, 0xe3590001},
+ {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001},
+ {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001},
+ {0xc78, 0x675e0001}, {0xc78, 0x665f0001},
+ {0xc78, 0x65600001}, {0xc78, 0x64610001},
+ {0xc78, 0x63620001}, {0xc78, 0x62630001},
+ {0xc78, 0x61640001}, {0xc78, 0x48650001},
+ {0xc78, 0x47660001}, {0xc78, 0x46670001},
+ {0xc78, 0x45680001}, {0xc78, 0x44690001},
+ {0xc78, 0x436a0001}, {0xc78, 0x426b0001},
+ {0xc78, 0x286c0001}, {0xc78, 0x276d0001},
+ {0xc78, 0x266e0001}, {0xc78, 0x256f0001},
+ {0xc78, 0x24700001}, {0xc78, 0x09710001},
+ {0xc78, 0x08720001}, {0xc78, 0x07730001},
+ {0xc78, 0x06740001}, {0xc78, 0x05750001},
+ {0xc78, 0x04760001}, {0xc78, 0x03770001},
+ {0xc78, 0x02780001}, {0xc78, 0x01790001},
+ {0xc78, 0x017a0001}, {0xc78, 0x017b0001},
+ {0xc78, 0x017c0001}, {0xc78, 0x017d0001},
+ {0xc78, 0x017e0001}, {0xc78, 0x017f0001},
+ {0xc50, 0x69553422},
+ {0xc50, 0x69553420},
+ {0x824, 0x00390204},
+ {0xffff, 0xffffffff}
+};
+
+static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
+ {0x00, 0x00010000}, {0xb0, 0x000dffe0},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb1, 0x00000018},
+ {0xfe, 0x00000000}, {0xfe, 0x00000000},
+ {0xfe, 0x00000000}, {0xb2, 0x00084c00},
+ {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa},
+ {0xb7, 0x00000010}, {0xb8, 0x0000907f},
+ {0x5c, 0x00000002}, {0x7c, 0x00000002},
+ {0x7e, 0x00000005}, {0x8b, 0x0006fc00},
+ {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2},
+ {0x1e, 0x00000000}, {0xdf, 0x00000780},
+ {0x50, 0x00067435},
+ /*
+ * The 8723bu vendor driver indicates that bit 8 should be set in
+ * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However
+ * they never actually check the package type - and just default
+ * to not setting it.
+ */
+ {0x51, 0x0006b04e},
+ {0x52, 0x000007d2}, {0x53, 0x00000000},
+ {0x54, 0x00050400}, {0x55, 0x0004026e},
+ {0xdd, 0x0000004c}, {0x70, 0x00067435},
+ /*
+ * 0x71 has same package type condition as for register 0x51
+ */
+ {0x71, 0x0006b04e},
+ {0x72, 0x000007d2}, {0x73, 0x00000000},
+ {0x74, 0x00050400}, {0x75, 0x0004026e},
+ {0xef, 0x00000100}, {0x34, 0x0000add7},
+ {0x35, 0x00005c00}, {0x34, 0x00009dd4},
+ {0x35, 0x00005000}, {0x34, 0x00008dd1},
+ {0x35, 0x00004400}, {0x34, 0x00007dce},
+ {0x35, 0x00003800}, {0x34, 0x00006cd1},
+ {0x35, 0x00004400}, {0x34, 0x00005cce},
+ {0x35, 0x00003800}, {0x34, 0x000048ce},
+ {0x35, 0x00004400}, {0x34, 0x000034ce},
+ {0x35, 0x00003800}, {0x34, 0x00002451},
+ {0x35, 0x00004400}, {0x34, 0x0000144e},
+ {0x35, 0x00003800}, {0x34, 0x00000051},
+ {0x35, 0x00004400}, {0xef, 0x00000000},
+ {0xef, 0x00000100}, {0xed, 0x00000010},
+ {0x44, 0x0000add7}, {0x44, 0x00009dd4},
+ {0x44, 0x00008dd1}, {0x44, 0x00007dce},
+ {0x44, 0x00006cc1}, {0x44, 0x00005cce},
+ {0x44, 0x000044d1}, {0x44, 0x000034ce},
+ {0x44, 0x00002451}, {0x44, 0x0000144e},
+ {0x44, 0x00000051}, {0xef, 0x00000000},
+ {0xed, 0x00000000}, {0x7f, 0x00020080},
+ {0xef, 0x00002000}, {0x3b, 0x000380ef},
+ {0x3b, 0x000302fe}, {0x3b, 0x00028ce6},
+ {0x3b, 0x000200bc}, {0x3b, 0x000188a5},
+ {0x3b, 0x00010fbc}, {0x3b, 0x00008f71},
+ {0x3b, 0x00000900}, {0xef, 0x00000000},
+ {0xed, 0x00000001}, {0x40, 0x000380ef},
+ {0x40, 0x000302fe}, {0x40, 0x00028ce6},
+ {0x40, 0x000200bc}, {0x40, 0x000188a5},
+ {0x40, 0x00010fbc}, {0x40, 0x00008f71},
+ {0x40, 0x00000900}, {0xed, 0x00000000},
+ {0x82, 0x00080000}, {0x83, 0x00008000},
+ {0x84, 0x00048d80}, {0x85, 0x00068000},
+ {0xa2, 0x00080000}, {0xa3, 0x00008000},
+ {0xa4, 0x00048d80}, {0xa5, 0x00068000},
+ {0xed, 0x00000002}, {0xef, 0x00000002},
+ {0x56, 0x00000032}, {0x76, 0x00000032},
+ {0x01, 0x00000780},
+ {0xff, 0xffffffff}
+};
+
+static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
+{
+ struct h2c_cmd h2c;
+ int reqnum = 0;
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.data = data;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+
+ reqnum++;
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
+ h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
+ h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
+ h2c.bt_mp_oper.addr = reg;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+}
+
+static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 sys_func;
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ sys_func &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
+
+ sys_func |= SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
+}
+
+static void
+rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+{
+ u32 val32, ofdm, mcs;
+ u8 cck, ofdmbase, mcsbase;
+ int group, tx_idx;
+
+ tx_idx = 0;
+ group = rtl8xxxu_gen2_channel_to_group(channel);
+
+ cck = priv->cck_tx_power_index_B[group];
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
+ val32 &= 0xffff00ff;
+ val32 |= (cck << 8);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
+ val32 &= 0xff;
+ val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
+
+ ofdmbase = priv->ht40_1s_tx_power_index_B[group];
+ ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
+ ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
+
+ mcsbase = priv->ht40_1s_tx_power_index_B[group];
+ if (ht40)
+ mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
+ else
+ mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
+ mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
+}
+
+static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
+{
+ struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu;
+ int i;
+
+ if (efuse->rtl_id != cpu_to_le16(0x8129))
+ return -EINVAL;
+
+ ether_addr_copy(priv->mac_addr, efuse->mac_addr);
+
+ memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
+ sizeof(efuse->tx_power_index_A.cck_base));
+ memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
+ sizeof(efuse->tx_power_index_B.cck_base));
+
+ memcpy(priv->ht40_1s_tx_power_index_A,
+ efuse->tx_power_index_A.ht40_base,
+ sizeof(efuse->tx_power_index_A.ht40_base));
+ memcpy(priv->ht40_1s_tx_power_index_B,
+ efuse->tx_power_index_B.ht40_base,
+ sizeof(efuse->tx_power_index_B.ht40_base));
+
+ priv->ofdm_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
+ priv->ofdm_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.a;
+
+ priv->ht20_tx_power_diff[0].a =
+ efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
+ priv->ht20_tx_power_diff[0].b =
+ efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
+
+ priv->ht40_tx_power_diff[0].a = 0;
+ priv->ht40_tx_power_diff[0].b = 0;
+
+ for (i = 1; i < RTL8723B_TX_COUNT; i++) {
+ priv->ofdm_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
+ priv->ofdm_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
+
+ priv->ht20_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
+ priv->ht20_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
+
+ priv->ht40_tx_power_diff[i].a =
+ efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
+ priv->ht40_tx_power_diff[i].b =
+ efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
+ }
+
+ priv->has_xtalk = 1;
+ priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
+
+ dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
+ dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
+
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
+ int i;
+ unsigned char *raw = priv->efuse_wifi.raw;
+
+ dev_info(&priv->udev->dev,
+ "%s: dumping efuse (0x%02zx bytes):\n",
+ __func__, sizeof(struct rtl8723bu_efuse));
+ for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
+ dev_info(&priv->udev->dev, "%02x: "
+ "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
+ raw[i], raw[i + 1], raw[i + 2],
+ raw[i + 3], raw[i + 4], raw[i + 5],
+ raw[i + 6], raw[i + 7]);
+ }
+ }
+
+ return 0;
+}
+
+static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
+{
+ char *fw_name;
+ int ret;
+
+ if (priv->enable_bluetooth)
+ fw_name = "/*(DEBLOBBED)*/";
+ else
+ fw_name = "/*(DEBLOBBED)*/";
+
+ ret = rtl8xxxu_load_firmware(priv, fw_name);
+ return ret;
+}
+
+static void rtl8723bu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB | SYS_FUNC_DIO_RF;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+ /* 6. 0x1f[7:0] = 0x07 */
+ val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
+ rtl8xxxu_write8(priv, REG_RF_CTRL, val8);
+
+ /* Why? */
+ rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
+ rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
+
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
+}
+
+static int rtl8723bu_init_phy_rf(struct rtl8xxxu_priv *priv)
+{
+ int ret;
+
+ ret = rtl8xxxu_init_phy_rf(priv, rtl8723bu_radioa_1t_init_table, RF_A);
+ /*
+ * PHY LCK
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
+ msleep(200);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
+
+ return ret;
+}
+
+static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ val32 = rtl8xxxu_read32(priv, REG_PAD_CTRL1);
+ val32 &= ~(BIT(20) | BIT(24));
+ rtl8xxxu_write32(priv, REG_PAD_CTRL1, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 &= ~BIT(4);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
+ val32 |= BIT(3);
+ rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= BIT(24);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC);
+ val32 &= 0xffffff00;
+ val32 |= 0x77;
+ rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+}
+
+static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+out:
+ return result;
+}
+
+static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
+{
+ u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32;
+ int result = 0;
+
+ path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * Enable path A PA in TX IQK mode
+ */
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ /*
+ * Tx IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /*
+ * The vendor driver indicates the USB module is always using
+ * S0S1 path 1 for the 8723bu. This may be different for 8192eu
+ */
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
+ * No trace of this in the 8192eu or 8188eu vendor drivers.
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
+ reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
+
+ val32 = (reg_e9c >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(28)) &&
+ ((reg_e94 & 0x03ff0000) != 0x01420000) &&
+ ((reg_e9c & 0x03ff0000) != 0x00420000) &&
+ ((reg_e94 & 0x03ff0000) < 0x01100000) &&
+ ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x01;
+ else /* If TX not OK, ignore RX */
+ goto out;
+
+ val32 = 0x80007c00 | (reg_e94 &0x3ff0000) |
+ ((reg_e9c & 0x3ff0000) >> 16);
+ rtl8xxxu_write32(priv, REG_TX_IQK, val32);
+
+ /*
+ * Modify RX IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77);
+
+ /*
+ * PA, PAD setting
+ */
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f);
+
+ /*
+ * RX IQK setting
+ */
+ rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
+
+ /* path-A IQK setting */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
+
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f);
+ rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
+ rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1);
+
+ /*
+ * Enter IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (priv->rf_paths > 1)
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
+ else
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
+
+ /*
+ * Disable BT
+ */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
+
+ /* One shot, path A LOK & IQK */
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
+ rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
+
+ mdelay(1);
+
+ /* Restore Ant Path */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
+#ifdef RTL8723BU_BT
+ /* GNT_BT = 1 */
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
+#endif
+
+ /*
+ * Leave IQK mode
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Check failed */
+ reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
+ reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780);
+
+ val32 = (reg_eac >> 16) & 0x3ff;
+ if (val32 & 0x200)
+ val32 = 0x400 - val32;
+
+ if (!(reg_eac & BIT(27)) &&
+ ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
+ ((reg_eac & 0x03ff0000) != 0x00360000) &&
+ ((reg_ea4 & 0x03ff0000) < 0x01100000) &&
+ ((reg_ea4 & 0x03ff0000) > 0x00f00000) &&
+ val32 < 0xf)
+ result |= 0x02;
+ else /* If TX not OK, ignore RX */
+ goto out;
+out:
+ return result;
+}
+
+static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
+ int result[][8], int t)
+{
+ struct device *dev = &priv->udev->dev;
+ u32 i, val32;
+ int path_a_ok /*, path_b_ok */;
+ int retry = 2;
+ const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
+ REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
+ REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
+ REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
+ REG_TX_OFDM_BBON, REG_TX_TO_RX,
+ REG_TX_TO_TX, REG_RX_CCK,
+ REG_RX_OFDM, REG_RX_WAIT_RIFS,
+ REG_RX_TO_RX, REG_STANDBY,
+ REG_SLEEP, REG_PMPD_ANAEN
+ };
+ const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
+ REG_TXPAUSE, REG_BEACON_CTRL,
+ REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
+ };
+ const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
+ REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
+ REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
+ REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
+ REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
+ };
+ u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
+ u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
+
+ /*
+ * Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ if (t == 0) {
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+ rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+ rtl8xxxu_save_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+ }
+
+ rtl8xxxu_path_adda_on(priv, adda_regs, true);
+
+ /* MAC settings */
+ rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
+
+ val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
+ val32 |= 0x0f000000;
+ rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
+
+ rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
+ rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
+ rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
+
+ /*
+ * RX IQ calibration setting for 8723B D cut large current issue
+ * when leaving IPS
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_iqk_path_a(priv);
+ if (path_a_ok == 0x01) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_BEFORE_IQK_A);
+ result[t][0] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_TX_POWER_AFTER_IQK_A);
+ result[t][1] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_A_2);
+ result[t][2] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_A_2);
+ result[t][3] = (val32 >> 16) & 0x3ff;
+
+ break;
+ }
+ }
+
+ if (!path_a_ok)
+ dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
+
+ if (priv->tx_paths > 1) {
+#if 1
+ dev_warn(dev, "%s: Path B not supported\n", __func__);
+#else
+
+ /*
+ * Path A into standby
+ */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
+
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ val32 |= 0x80800000;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ /* Turn Path B ADDA on */
+ rtl8xxxu_path_adda_on(priv, adda_regs, false);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8xxxu_iqk_path_b(priv);
+ if (path_b_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
+ result[t][4] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
+ result[t][5] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
+
+ for (i = 0; i < retry; i++) {
+ path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
+ if (path_a_ok == 0x03) {
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_BEFORE_IQK_B_2);
+ result[t][6] = (val32 >> 16) & 0x3ff;
+ val32 = rtl8xxxu_read32(priv,
+ REG_RX_POWER_AFTER_IQK_B_2);
+ result[t][7] = (val32 >> 16) & 0x3ff;
+ break;
+ }
+ }
+
+ if (!path_b_ok)
+ dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
+#endif
+ }
+
+ /* Back to BB mode, load original value */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 &= 0x000000ff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+
+ if (t) {
+ /* Reload ADDA power saving parameters */
+ rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
+ RTL8XXXU_ADDA_REGS);
+
+ /* Reload MAC parameters */
+ rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
+
+ /* Reload BB parameters */
+ rtl8xxxu_restore_regs(priv, iqk_bb_regs,
+ priv->bb_backup, RTL8XXXU_BB_REGS);
+
+ /* Restore RX initial gain */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
+
+ if (priv->tx_paths > 1) {
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
+ val32 &= 0xffffff00;
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | 0x50);
+ rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
+ val32 | xb_agc);
+ }
+
+ /* Load 0xe30 IQC default value */
+ rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
+ rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
+ }
+}
+
+static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+{
+ struct device *dev = &priv->udev->dev;
+ int result[4][8]; /* last is final result */
+ int i, candidate;
+ bool path_a_ok, path_b_ok;
+ u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
+ u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
+ u32 val32, bt_control;
+ s32 reg_tmp = 0;
+ bool simu;
+
+ rtl8xxxu_gen2_prepare_calibrate(priv, 1);
+
+ memset(result, 0, sizeof(result));
+ candidate = -1;
+
+ path_a_ok = false;
+ path_b_ok = false;
+
+ bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU);
+
+ for (i = 0; i < 3; i++) {
+ rtl8723bu_phy_iqcalibrate(priv, result, i);
+
+ if (i == 1) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 1);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 0, 2);
+ if (simu) {
+ candidate = 0;
+ break;
+ }
+
+ simu = rtl8xxxu_gen2_simularity_compare(priv,
+ result, 1, 2);
+ if (simu) {
+ candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ reg_tmp += result[3][i];
+
+ if (reg_tmp)
+ candidate = 3;
+ else
+ candidate = -1;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg_e94 = result[i][0];
+ reg_e9c = result[i][1];
+ reg_ea4 = result[i][2];
+ reg_eac = result[i][3];
+ reg_eb4 = result[i][4];
+ reg_ebc = result[i][5];
+ reg_ec4 = result[i][6];
+ reg_ecc = result[i][7];
+ }
+
+ if (candidate >= 0) {
+ reg_e94 = result[candidate][0];
+ priv->rege94 = reg_e94;
+ reg_e9c = result[candidate][1];
+ priv->rege9c = reg_e9c;
+ reg_ea4 = result[candidate][2];
+ reg_eac = result[candidate][3];
+ reg_eb4 = result[candidate][4];
+ priv->regeb4 = reg_eb4;
+ reg_ebc = result[candidate][5];
+ priv->regebc = reg_ebc;
+ reg_ec4 = result[candidate][6];
+ reg_ecc = result[candidate][7];
+ dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
+ dev_dbg(dev,
+ "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
+ "ecc=%x\n ", __func__, reg_e94, reg_e9c,
+ reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
+ path_a_ok = true;
+ path_b_ok = true;
+ } else {
+ reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
+ reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
+ }
+
+ if (reg_e94 && candidate >= 0)
+ rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
+ candidate, (reg_ea4 == 0));
+
+ if (priv->tx_paths > 1 && reg_eb4)
+ rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
+ candidate, (reg_ec4 == 0));
+
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
+ priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
+
+ rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
+
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
+ val32 |= 0x80000;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177);
+ val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
+ val32 |= 0x20;
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
+ rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
+
+ if (priv->rf_paths > 1)
+ dev_dbg(dev, "%s: 8723BU 2T not supported\n", __func__);
+
+ rtl8xxxu_gen2_prepare_calibrate(priv, 0);
+}
+
+static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int count, ret = 0;
+
+ /* Turn off RF */
+ rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
+
+ /* Enable rising edge triggering interrupt */
+ val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM);
+ val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ;
+ rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* 0x0005[1] = 1 turn off MAC by HW state machine*/
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ if ((val8 & BIT(1)) == 0)
+ break;
+ udelay(10);
+ }
+
+ if (!count) {
+ dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
+ __func__);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable BT control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 &= ~AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 |= SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 &= ~LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+ int count, ret = 0;
+
+ /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */
+ val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
+ val8 |= LDOA15_ENABLE;
+ rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
+
+ /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 &= ~BIT(4);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ mdelay(1);
+
+ /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
+ val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
+ val8 &= ~SYS_ISO_ANALOG_IPS;
+ rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
+
+ /* Disable SW LPS 0x04[10]= 0 */
+ val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_SW_LPS;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Wait until 0x04[17] = 1 power ready */
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if (val32 & BIT(17))
+ break;
+
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* We should be able to optimize the following three entries into one */
+
+ /* Release WLON reset 0x04[16]= 1*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_WLON_RESET;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable HWPDN 0x04[15]= 0*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~APS_FSMCO_HW_POWERDOWN;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Disable WL suspend*/
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ /* Set, then poll until 0 */
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ val32 |= APS_FSMCO_MAC_ENABLE;
+ rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
+
+ for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
+ val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
+ if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
+ ret = 0;
+ break;
+ }
+ udelay(10);
+ }
+
+ if (!count) {
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ /* Enable WL control XTAL setting */
+ val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
+ val8 |= AFE_MISC_WL_XTAL_CTRL;
+ rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
+
+ /* Enable falling edge triggering interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8);
+
+ /* Enable GPIO9 interrupt mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8);
+
+ /* Enable GPIO9 input mode */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2);
+ val8 &= ~BIT(1);
+ rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8);
+
+ /* Enable HSISR GPIO[C:0] interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_HSIMR, val8);
+
+ /* Enable HSISR GPIO9 interrupt */
+ val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2);
+ val8 |= BIT(1);
+ rtl8xxxu_write8(priv, REG_HSIMR + 2, val8);
+
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL);
+ val8 |= MULTI_WIFI_HW_ROF_EN;
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8);
+
+ /* For GPIO9 internal pull high setting BIT(14) */
+ val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1);
+ val8 |= BIT(6);
+ rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8);
+
+exit:
+ return ret;
+}
+
+static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ rtl8xxxu_disabled_to_emu(priv);
+
+ ret = rtl8723b_emu_to_active(priv);
+ if (ret)
+ goto exit;
+
+ /*
+ * Enable MAC DMA/WMAC/SCHEDULE/SEC block
+ * Set CR bit10 to enable 32k calibration.
+ */
+ val16 = rtl8xxxu_read16(priv, REG_CR);
+ val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
+ CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
+ CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
+ CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
+ CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
+ rtl8xxxu_write16(priv, REG_CR, val16);
+
+ /*
+ * BT coexist power on settings. This is identical for 1 and 2
+ * antenna parts.
+ */
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20);
+
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18);
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ /* Antenna inverse */
+ rtl8xxxu_write8(priv, 0xfe08, 0x01);
+
+ val16 = rtl8xxxu_read16(priv, REG_PWR_DATA);
+ val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write16(priv, REG_PWR_DATA, val16);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 |= LEDCFG0_DPDT_SELECT;
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA;
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+exit:
+ return ret;
+}
+
+static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u16 val16;
+
+ rtl8xxxu_flush_fifo(priv);
+
+ /*
+ * Disable TX report timer
+ */
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+
+ rtl8xxxu_write8(priv, REG_CR, 0x0000);
+
+ rtl8xxxu_active_to_lps(priv);
+
+ /* Reset Firmware if running in RAM */
+ if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
+ rtl8xxxu_firmware_self_reset(priv);
+
+ /* Reset MCU */
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 &= ~SYS_FUNC_CPU_ENABLE;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
+
+ /* Reset MCU ready status */
+ rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
+
+ rtl8723bu_active_to_emu(priv);
+
+ val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
+ val8 |= BIT(3); /* APS_FSMCO_HW_SUSPEND */
+ rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
+
+ /* 0x48[16] = 1 to enable GPIO9 as EXT wakeup */
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 2);
+ val8 |= BIT(0);
+ rtl8xxxu_write8(priv, REG_GPIO_INTM + 2, val8);
+}
+
+static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+{
+ struct h2c_cmd h2c;
+ u32 val32;
+ u8 val8;
+
+ /*
+ * No indication anywhere as to what 0x0790 does. The 2 antenna
+ * vendor code preserves bits 6-7 here.
+ */
+ rtl8xxxu_write8(priv, 0x0790, 0x05);
+ /*
+ * 0x0778 seems to be related to enabling the number of antennas
+ * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it
+ * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01
+ */
+ rtl8xxxu_write8(priv, 0x0778, 0x01);
+
+ val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
+
+ rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
+
+ rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */
+
+ /*
+ * Set BT grant to low
+ */
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_grant.cmd = H2C_8723B_BT_GRANT;
+ h2c.bt_grant.data = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant));
+
+ /*
+ * WLAN action by PTA
+ */
+ rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
+
+ /*
+ * BT select S0/S1 controlled by WiFi
+ */
+ val8 = rtl8xxxu_read8(priv, 0x0067);
+ val8 |= BIT(5);
+ rtl8xxxu_write8(priv, 0x0067, val8);
+
+ val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
+ val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
+ rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
+
+ /*
+ * Bits 6/7 are marked in/out ... but for what?
+ */
+ rtl8xxxu_write8(priv, 0x0974, 0xff);
+
+ val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
+ val32 |= (BIT(0) | BIT(1));
+ rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
+
+ rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
+
+ val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
+ val32 &= ~BIT(24);
+ val32 |= BIT(23);
+ rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
+
+ /*
+ * Fix external switch Main->S1, Aux->S0
+ */
+ val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
+ val8 &= ~BIT(0);
+ rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV;
+ h2c.ant_sel_rsv.ant_inverse = 1;
+ h2c.ant_sel_rsv.int_switch_type = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
+
+ /*
+ * 0x280, 0x00, 0x200, 0x80 - not clear
+ */
+ rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+
+ /*
+ * Software control, antenna at WiFi side
+ */
+#ifdef NEED_PS_TDMA
+ rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
+#endif
+
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
+ rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
+ rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.bt_info.cmd = H2C_8723B_BT_INFO;
+ h2c.bt_info.data = BIT(0);
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info));
+
+ memset(&h2c, 0, sizeof(struct h2c_cmd));
+ h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT;
+ h2c.ignore_wlan.data = 0;
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
+}
+
+static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv)
+{
+ u32 agg_rx;
+ u8 agg_ctrl;
+
+ /*
+ * For now simply disable RX aggregation
+ */
+ agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
+ agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+
+ agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
+ agg_rx &= ~RXDMA_USB_AGG_ENABLE;
+ agg_rx &= ~0xff0f;
+
+ rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
+ rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
+}
+
+static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
+{
+ u32 val32;
+
+ /* Time duration for NHM unit: 4us, 0x2710=40ms */
+ rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710);
+ rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
+ rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52);
+ rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
+ /* TH8 */
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
+ val32 |= 0xff;
+ rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
+ /* Enable CCK */
+ val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
+ val32 |= BIT(8) | BIT(9) | BIT(10);
+ rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
+ /* Max power amongst all RX antennas */
+ val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
+ val32 |= BIT(7);
+ rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+}
+
+struct rtl8xxxu_fileops rtl8723bu_fops = {
+ .parse_efuse = rtl8723bu_parse_efuse,
+ .load_firmware = rtl8723bu_load_firmware,
+ .power_on = rtl8723bu_power_on,
+ .power_off = rtl8723bu_power_off,
+ .reset_8051 = rtl8723bu_reset_8051,
+ .llt_init = rtl8xxxu_auto_llt_table,
+ .init_phy_bb = rtl8723bu_init_phy_bb,
+ .init_phy_rf = rtl8723bu_init_phy_rf,
+ .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
+ .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
+ .config_channel = rtl8xxxu_gen2_config_channel,
+ .parse_rx_desc = rtl8xxxu_parse_rxdesc24,
+ .init_aggregation = rtl8723bu_init_aggregation,
+ .init_statistics = rtl8723bu_init_statistics,
+ .enable_rf = rtl8723b_enable_rf,
+ .disable_rf = rtl8xxxu_gen2_disable_rf,
+ .usb_quirks = rtl8xxxu_gen2_usb_quirks,
+ .set_tx_power = rtl8723b_set_tx_power,
+ .update_rate_mask = rtl8xxxu_gen2_update_rate_mask,
+ .report_connect = rtl8xxxu_gen2_report_connect,
+ .writeN_block_size = 1024,
+ .tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
+ .rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
+ .has_s0s1 = 1,
+ .adda_1t_init = 0x01c00014,
+ .adda_1t_path_on = 0x01c00014,
+ .adda_2t_path_on_a = 0x01c00014,
+ .adda_2t_path_on_b = 0x01c00014,
+ .trxff_boundary = 0x3f7f,
+ .pbp_rx = PBP_PAGE_SIZE_256,
+ .pbp_tx = PBP_PAGE_SIZE_256,
+ .mactable = rtl8723b_mac_init_table,
+};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index a275ff145..05a1ff26f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -1,7 +1,7 @@
/*
* RTL8XXXU mac80211 USB driver
*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* Portions, notably calibration code:
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
@@ -42,7 +42,7 @@
#define DRIVER_NAME "rtl8xxxu"
-static int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
+int rtl8xxxu_debug = RTL8XXXU_DEBUG_EFUSE;
static bool rtl8xxxu_ht40_2g;
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
@@ -83,33 +83,33 @@ static struct ieee80211_rate rtl8xxxu_rates[] = {
};
static struct ieee80211_channel rtl8xxxu_channels_2g[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
.hw_value = 1, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
.hw_value = 2, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
.hw_value = 3, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
.hw_value = 4, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
.hw_value = 5, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
.hw_value = 6, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
.hw_value = 7, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
.hw_value = 8, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
.hw_value = 9, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
.hw_value = 10, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
.hw_value = 11, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
.hw_value = 12, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
.hw_value = 13, .max_power = 30 },
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
.hw_value = 14, .max_power = 30 }
};
@@ -120,7 +120,7 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
};
-static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
+struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
{0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
{0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
{0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
@@ -145,37 +145,6 @@ static struct rtl8xxxu_reg8val rtl8723a_mac_init_table[] = {
{0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
};
-static struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
- {0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
- {0x064, 0x00}, {0x067, 0x20}, {0x428, 0x0a}, {0x429, 0x10},
- {0x430, 0x00}, {0x431, 0x00},
- {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
- {0x436, 0x07}, {0x437, 0x08}, {0x43c, 0x04}, {0x43d, 0x05},
- {0x43e, 0x07}, {0x43f, 0x08}, {0x440, 0x5d}, {0x441, 0x01},
- {0x442, 0x00}, {0x444, 0x10}, {0x445, 0x00}, {0x446, 0x00},
- {0x447, 0x00}, {0x448, 0x00}, {0x449, 0xf0}, {0x44a, 0x0f},
- {0x44b, 0x3e}, {0x44c, 0x10}, {0x44d, 0x00}, {0x44e, 0x00},
- {0x44f, 0x00}, {0x450, 0x00}, {0x451, 0xf0}, {0x452, 0x0f},
- {0x453, 0x00}, {0x456, 0x5e}, {0x460, 0x66}, {0x461, 0x66},
- {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
- {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
- {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
- {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
- {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
- {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
- {0x516, 0x0a}, {0x525, 0x4f},
- {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55c, 0x50},
- {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
- {0x620, 0xff}, {0x621, 0xff}, {0x622, 0xff}, {0x623, 0xff},
- {0x624, 0xff}, {0x625, 0xff}, {0x626, 0xff}, {0x627, 0xff},
- {0x638, 0x50}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
- {0x63f, 0x0e}, {0x640, 0x40}, {0x642, 0x40}, {0x643, 0x00},
- {0x652, 0xc8}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
- {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
- {0x70a, 0x65}, {0x70b, 0x87}, {0x765, 0x18}, {0x76e, 0x04},
- {0xffff, 0xff},
-};
-
static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -274,107 +243,6 @@ static struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0xffff, 0xffffffff},
};
-static struct rtl8xxxu_reg32val rtl8723b_phy_1t_init_table[] = {
- {0x800, 0x80040000}, {0x804, 0x00000003},
- {0x808, 0x0000fc00}, {0x80c, 0x0000000a},
- {0x810, 0x10001331}, {0x814, 0x020c3d10},
- {0x818, 0x02200385}, {0x81c, 0x00000000},
- {0x820, 0x01000100}, {0x824, 0x00190204},
- {0x828, 0x00000000}, {0x82c, 0x00000000},
- {0x830, 0x00000000}, {0x834, 0x00000000},
- {0x838, 0x00000000}, {0x83c, 0x00000000},
- {0x840, 0x00010000}, {0x844, 0x00000000},
- {0x848, 0x00000000}, {0x84c, 0x00000000},
- {0x850, 0x00000000}, {0x854, 0x00000000},
- {0x858, 0x569a11a9}, {0x85c, 0x01000014},
- {0x860, 0x66f60110}, {0x864, 0x061f0649},
- {0x868, 0x00000000}, {0x86c, 0x27272700},
- {0x870, 0x07000760}, {0x874, 0x25004000},
- {0x878, 0x00000808}, {0x87c, 0x00000000},
- {0x880, 0xb0000c1c}, {0x884, 0x00000001},
- {0x888, 0x00000000}, {0x88c, 0xccc000c0},
- {0x890, 0x00000800}, {0x894, 0xfffffffe},
- {0x898, 0x40302010}, {0x89c, 0x00706050},
- {0x900, 0x00000000}, {0x904, 0x00000023},
- {0x908, 0x00000000}, {0x90c, 0x81121111},
- {0x910, 0x00000002}, {0x914, 0x00000201},
- {0xa00, 0x00d047c8}, {0xa04, 0x80ff800c},
- {0xa08, 0x8c838300}, {0xa0c, 0x2e7f120f},
- {0xa10, 0x9500bb78}, {0xa14, 0x1114d028},
- {0xa18, 0x00881117}, {0xa1c, 0x89140f00},
- {0xa20, 0x1a1b0000}, {0xa24, 0x090e1317},
- {0xa28, 0x00000204}, {0xa2c, 0x00d30000},
- {0xa70, 0x101fbf00}, {0xa74, 0x00000007},
- {0xa78, 0x00000900}, {0xa7c, 0x225b0606},
- {0xa80, 0x21806490}, {0xb2c, 0x00000000},
- {0xc00, 0x48071d40}, {0xc04, 0x03a05611},
- {0xc08, 0x000000e4}, {0xc0c, 0x6c6c6c6c},
- {0xc10, 0x08800000}, {0xc14, 0x40000100},
- {0xc18, 0x08800000}, {0xc1c, 0x40000100},
- {0xc20, 0x00000000}, {0xc24, 0x00000000},
- {0xc28, 0x00000000}, {0xc2c, 0x00000000},
- {0xc30, 0x69e9ac44}, {0xc34, 0x469652af},
- {0xc38, 0x49795994}, {0xc3c, 0x0a97971c},
- {0xc40, 0x1f7c403f}, {0xc44, 0x000100b7},
- {0xc48, 0xec020107}, {0xc4c, 0x007f037f},
- {0xc50, 0x69553420}, {0xc54, 0x43bc0094},
- {0xc58, 0x00013149}, {0xc5c, 0x00250492},
- {0xc60, 0x00000000}, {0xc64, 0x7112848b},
- {0xc68, 0x47c00bff}, {0xc6c, 0x00000036},
- {0xc70, 0x2c7f000d}, {0xc74, 0x020610db},
- {0xc78, 0x0000001f}, {0xc7c, 0x00b91612},
- {0xc80, 0x390000e4}, {0xc84, 0x20f60000},
- {0xc88, 0x40000100}, {0xc8c, 0x20200000},
- {0xc90, 0x00020e1a}, {0xc94, 0x00000000},
- {0xc98, 0x00020e1a}, {0xc9c, 0x00007f7f},
- {0xca0, 0x00000000}, {0xca4, 0x000300a0},
- {0xca8, 0x00000000}, {0xcac, 0x00000000},
- {0xcb0, 0x00000000}, {0xcb4, 0x00000000},
- {0xcb8, 0x00000000}, {0xcbc, 0x28000000},
- {0xcc0, 0x00000000}, {0xcc4, 0x00000000},
- {0xcc8, 0x00000000}, {0xccc, 0x00000000},
- {0xcd0, 0x00000000}, {0xcd4, 0x00000000},
- {0xcd8, 0x64b22427}, {0xcdc, 0x00766932},
- {0xce0, 0x00222222}, {0xce4, 0x00000000},
- {0xce8, 0x37644302}, {0xcec, 0x2f97d40c},
- {0xd00, 0x00000740}, {0xd04, 0x40020401},
- {0xd08, 0x0000907f}, {0xd0c, 0x20010201},
- {0xd10, 0xa0633333}, {0xd14, 0x3333bc53},
- {0xd18, 0x7a8f5b6f}, {0xd2c, 0xcc979975},
- {0xd30, 0x00000000}, {0xd34, 0x80608000},
- {0xd38, 0x00000000}, {0xd3c, 0x00127353},
- {0xd40, 0x00000000}, {0xd44, 0x00000000},
- {0xd48, 0x00000000}, {0xd4c, 0x00000000},
- {0xd50, 0x6437140a}, {0xd54, 0x00000000},
- {0xd58, 0x00000282}, {0xd5c, 0x30032064},
- {0xd60, 0x4653de68}, {0xd64, 0x04518a3c},
- {0xd68, 0x00002101}, {0xd6c, 0x2a201c16},
- {0xd70, 0x1812362e}, {0xd74, 0x322c2220},
- {0xd78, 0x000e3c24}, {0xe00, 0x2d2d2d2d},
- {0xe04, 0x2d2d2d2d}, {0xe08, 0x0390272d},
- {0xe10, 0x2d2d2d2d}, {0xe14, 0x2d2d2d2d},
- {0xe18, 0x2d2d2d2d}, {0xe1c, 0x2d2d2d2d},
- {0xe28, 0x00000000}, {0xe30, 0x1000dc1f},
- {0xe34, 0x10008c1f}, {0xe38, 0x02140102},
- {0xe3c, 0x681604c2}, {0xe40, 0x01007c00},
- {0xe44, 0x01004800}, {0xe48, 0xfb000000},
- {0xe4c, 0x000028d1}, {0xe50, 0x1000dc1f},
- {0xe54, 0x10008c1f}, {0xe58, 0x02140102},
- {0xe5c, 0x28160d05}, {0xe60, 0x00000008},
- {0xe68, 0x001b2556}, {0xe6c, 0x00c00096},
- {0xe70, 0x00c00096}, {0xe74, 0x01000056},
- {0xe78, 0x01000014}, {0xe7c, 0x01000056},
- {0xe80, 0x01000014}, {0xe84, 0x00c00096},
- {0xe88, 0x01000056}, {0xe8c, 0x00c00096},
- {0xed0, 0x00c00096}, {0xed4, 0x00c00096},
- {0xed8, 0x00c00096}, {0xedc, 0x000000d6},
- {0xee0, 0x000000d6}, {0xeec, 0x01c00016},
- {0xf14, 0x00000003}, {0xf4c, 0x00000000},
- {0xf00, 0x00000300},
- {0x820, 0x01000100}, {0x800, 0x83040000},
- {0xffff, 0xffffffff},
-};
-
static struct rtl8xxxu_reg32val rtl8192cu_phy_2t_init_table[] = {
{0x024, 0x0011800f}, {0x028, 0x00ffdb83},
{0x800, 0x80040002}, {0x804, 0x00000003},
@@ -740,470 +608,6 @@ static struct rtl8xxxu_reg32val rtl8xxx_agc_highpa_table[] = {
{0xffff, 0xffffffff}
};
-static struct rtl8xxxu_reg32val rtl8xxx_agc_8723bu_table[] = {
- {0xc78, 0xfd000001}, {0xc78, 0xfc010001},
- {0xc78, 0xfb020001}, {0xc78, 0xfa030001},
- {0xc78, 0xf9040001}, {0xc78, 0xf8050001},
- {0xc78, 0xf7060001}, {0xc78, 0xf6070001},
- {0xc78, 0xf5080001}, {0xc78, 0xf4090001},
- {0xc78, 0xf30a0001}, {0xc78, 0xf20b0001},
- {0xc78, 0xf10c0001}, {0xc78, 0xf00d0001},
- {0xc78, 0xef0e0001}, {0xc78, 0xee0f0001},
- {0xc78, 0xed100001}, {0xc78, 0xec110001},
- {0xc78, 0xeb120001}, {0xc78, 0xea130001},
- {0xc78, 0xe9140001}, {0xc78, 0xe8150001},
- {0xc78, 0xe7160001}, {0xc78, 0xe6170001},
- {0xc78, 0xe5180001}, {0xc78, 0xe4190001},
- {0xc78, 0xe31a0001}, {0xc78, 0xa51b0001},
- {0xc78, 0xa41c0001}, {0xc78, 0xa31d0001},
- {0xc78, 0x671e0001}, {0xc78, 0x661f0001},
- {0xc78, 0x65200001}, {0xc78, 0x64210001},
- {0xc78, 0x63220001}, {0xc78, 0x4a230001},
- {0xc78, 0x49240001}, {0xc78, 0x48250001},
- {0xc78, 0x47260001}, {0xc78, 0x46270001},
- {0xc78, 0x45280001}, {0xc78, 0x44290001},
- {0xc78, 0x432a0001}, {0xc78, 0x422b0001},
- {0xc78, 0x292c0001}, {0xc78, 0x282d0001},
- {0xc78, 0x272e0001}, {0xc78, 0x262f0001},
- {0xc78, 0x0a300001}, {0xc78, 0x09310001},
- {0xc78, 0x08320001}, {0xc78, 0x07330001},
- {0xc78, 0x06340001}, {0xc78, 0x05350001},
- {0xc78, 0x04360001}, {0xc78, 0x03370001},
- {0xc78, 0x02380001}, {0xc78, 0x01390001},
- {0xc78, 0x013a0001}, {0xc78, 0x013b0001},
- {0xc78, 0x013c0001}, {0xc78, 0x013d0001},
- {0xc78, 0x013e0001}, {0xc78, 0x013f0001},
- {0xc78, 0xfc400001}, {0xc78, 0xfb410001},
- {0xc78, 0xfa420001}, {0xc78, 0xf9430001},
- {0xc78, 0xf8440001}, {0xc78, 0xf7450001},
- {0xc78, 0xf6460001}, {0xc78, 0xf5470001},
- {0xc78, 0xf4480001}, {0xc78, 0xf3490001},
- {0xc78, 0xf24a0001}, {0xc78, 0xf14b0001},
- {0xc78, 0xf04c0001}, {0xc78, 0xef4d0001},
- {0xc78, 0xee4e0001}, {0xc78, 0xed4f0001},
- {0xc78, 0xec500001}, {0xc78, 0xeb510001},
- {0xc78, 0xea520001}, {0xc78, 0xe9530001},
- {0xc78, 0xe8540001}, {0xc78, 0xe7550001},
- {0xc78, 0xe6560001}, {0xc78, 0xe5570001},
- {0xc78, 0xe4580001}, {0xc78, 0xe3590001},
- {0xc78, 0xa65a0001}, {0xc78, 0xa55b0001},
- {0xc78, 0xa45c0001}, {0xc78, 0xa35d0001},
- {0xc78, 0x675e0001}, {0xc78, 0x665f0001},
- {0xc78, 0x65600001}, {0xc78, 0x64610001},
- {0xc78, 0x63620001}, {0xc78, 0x62630001},
- {0xc78, 0x61640001}, {0xc78, 0x48650001},
- {0xc78, 0x47660001}, {0xc78, 0x46670001},
- {0xc78, 0x45680001}, {0xc78, 0x44690001},
- {0xc78, 0x436a0001}, {0xc78, 0x426b0001},
- {0xc78, 0x286c0001}, {0xc78, 0x276d0001},
- {0xc78, 0x266e0001}, {0xc78, 0x256f0001},
- {0xc78, 0x24700001}, {0xc78, 0x09710001},
- {0xc78, 0x08720001}, {0xc78, 0x07730001},
- {0xc78, 0x06740001}, {0xc78, 0x05750001},
- {0xc78, 0x04760001}, {0xc78, 0x03770001},
- {0xc78, 0x02780001}, {0xc78, 0x01790001},
- {0xc78, 0x017a0001}, {0xc78, 0x017b0001},
- {0xc78, 0x017c0001}, {0xc78, 0x017d0001},
- {0xc78, 0x017e0001}, {0xc78, 0x017f0001},
- {0xc50, 0x69553422},
- {0xc50, 0x69553420},
- {0x824, 0x00390204},
- {0xffff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00039c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001a3f1}, {0x0b, 0x00014787},
- {0x0c, 0x000896fe}, {0x0d, 0x0000e02c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00030355},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0000024f},
- {0x1f, 0x00000000}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x00057730},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f474},
- {0x15, 0x0004f477}, {0x15, 0x0008f455},
- {0x15, 0x000cf455}, {0x16, 0x00000339},
- {0x16, 0x00040339}, {0x16, 0x00080339},
- {0x16, 0x000c0366}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00000003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00000247}, {0x1f, 0x00000000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8723bu_radioa_1t_init_table[] = {
- {0x00, 0x00010000}, {0xb0, 0x000dffe0},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0xb1, 0x00000018},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0xb2, 0x00084c00},
- {0xb5, 0x0000d2cc}, {0xb6, 0x000925aa},
- {0xb7, 0x00000010}, {0xb8, 0x0000907f},
- {0x5c, 0x00000002}, {0x7c, 0x00000002},
- {0x7e, 0x00000005}, {0x8b, 0x0006fc00},
- {0xb0, 0x000ff9f0}, {0x1c, 0x000739d2},
- {0x1e, 0x00000000}, {0xdf, 0x00000780},
- {0x50, 0x00067435},
- /*
- * The 8723bu vendor driver indicates that bit 8 should be set in
- * 0x51 for package types TFBGA90, TFBGA80, and TFBGA79. However
- * they never actually check the package type - and just default
- * to not setting it.
- */
- {0x51, 0x0006b04e},
- {0x52, 0x000007d2}, {0x53, 0x00000000},
- {0x54, 0x00050400}, {0x55, 0x0004026e},
- {0xdd, 0x0000004c}, {0x70, 0x00067435},
- /*
- * 0x71 has same package type condition as for register 0x51
- */
- {0x71, 0x0006b04e},
- {0x72, 0x000007d2}, {0x73, 0x00000000},
- {0x74, 0x00050400}, {0x75, 0x0004026e},
- {0xef, 0x00000100}, {0x34, 0x0000add7},
- {0x35, 0x00005c00}, {0x34, 0x00009dd4},
- {0x35, 0x00005000}, {0x34, 0x00008dd1},
- {0x35, 0x00004400}, {0x34, 0x00007dce},
- {0x35, 0x00003800}, {0x34, 0x00006cd1},
- {0x35, 0x00004400}, {0x34, 0x00005cce},
- {0x35, 0x00003800}, {0x34, 0x000048ce},
- {0x35, 0x00004400}, {0x34, 0x000034ce},
- {0x35, 0x00003800}, {0x34, 0x00002451},
- {0x35, 0x00004400}, {0x34, 0x0000144e},
- {0x35, 0x00003800}, {0x34, 0x00000051},
- {0x35, 0x00004400}, {0xef, 0x00000000},
- {0xef, 0x00000100}, {0xed, 0x00000010},
- {0x44, 0x0000add7}, {0x44, 0x00009dd4},
- {0x44, 0x00008dd1}, {0x44, 0x00007dce},
- {0x44, 0x00006cc1}, {0x44, 0x00005cce},
- {0x44, 0x000044d1}, {0x44, 0x000034ce},
- {0x44, 0x00002451}, {0x44, 0x0000144e},
- {0x44, 0x00000051}, {0xef, 0x00000000},
- {0xed, 0x00000000}, {0x7f, 0x00020080},
- {0xef, 0x00002000}, {0x3b, 0x000380ef},
- {0x3b, 0x000302fe}, {0x3b, 0x00028ce6},
- {0x3b, 0x000200bc}, {0x3b, 0x000188a5},
- {0x3b, 0x00010fbc}, {0x3b, 0x00008f71},
- {0x3b, 0x00000900}, {0xef, 0x00000000},
- {0xed, 0x00000001}, {0x40, 0x000380ef},
- {0x40, 0x000302fe}, {0x40, 0x00028ce6},
- {0x40, 0x000200bc}, {0x40, 0x000188a5},
- {0x40, 0x00010fbc}, {0x40, 0x00008f71},
- {0x40, 0x00000900}, {0xed, 0x00000000},
- {0x82, 0x00080000}, {0x83, 0x00008000},
- {0x84, 0x00048d80}, {0x85, 0x00068000},
- {0xa2, 0x00080000}, {0xa3, 0x00008000},
- {0xa4, 0x00048d80}, {0xa5, 0x00068000},
- {0xed, 0x00000002}, {0xef, 0x00000002},
- {0x56, 0x00000032}, {0x76, 0x00000032},
- {0x01, 0x00000780},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00010255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f424},
- {0x15, 0x0004f424}, {0x15, 0x0008f424},
- {0x15, 0x000cf424}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radiob_2t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287af}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x00014297},
- {0x13, 0x00010295}, {0x13, 0x0000c298},
- {0x13, 0x0000819c}, {0x13, 0x000040a8},
- {0x13, 0x0000001c}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f424},
- {0x15, 0x0004f424}, {0x15, 0x0008f424},
- {0x15, 0x000cf424}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8192cu_radioa_1t_init_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb1}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e52c},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00010255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x00000000},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000577c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x00032000}, {0x12, 0x00071000},
- {0x12, 0x000b0000}, {0x12, 0x000fc000},
- {0x13, 0x000287b3}, {0x13, 0x000244b7},
- {0x13, 0x000204ab}, {0x13, 0x0001c49f},
- {0x13, 0x00018493}, {0x13, 0x0001429b},
- {0x13, 0x00010299}, {0x13, 0x0000c29c},
- {0x13, 0x000081a0}, {0x13, 0x000040ac},
- {0x13, 0x00000020}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f405},
- {0x15, 0x0004f405}, {0x15, 0x0008f405},
- {0x15, 0x000cf405}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
-static struct rtl8xxxu_rfregval rtl8188ru_radioa_1t_highpa_table[] = {
- {0x00, 0x00030159}, {0x01, 0x00031284},
- {0x02, 0x00098000}, {0x03, 0x00018c63},
- {0x04, 0x000210e7}, {0x09, 0x0002044f},
- {0x0a, 0x0001adb0}, {0x0b, 0x00054867},
- {0x0c, 0x0008992e}, {0x0d, 0x0000e529},
- {0x0e, 0x00039ce7}, {0x0f, 0x00000451},
- {0x19, 0x00000000}, {0x1a, 0x00000255},
- {0x1b, 0x00060a00}, {0x1c, 0x000fc378},
- {0x1d, 0x000a1250}, {0x1e, 0x0004445f},
- {0x1f, 0x00080001}, {0x20, 0x0000b614},
- {0x21, 0x0006c000}, {0x22, 0x0000083c},
- {0x23, 0x00001558}, {0x24, 0x00000060},
- {0x25, 0x00000483}, {0x26, 0x0004f000},
- {0x27, 0x000ec7d9}, {0x28, 0x000977c0},
- {0x29, 0x00004783}, {0x2a, 0x00000001},
- {0x2b, 0x00021334}, {0x2a, 0x00000000},
- {0x2b, 0x00000054}, {0x2a, 0x00000001},
- {0x2b, 0x00000808}, {0x2b, 0x00053333},
- {0x2c, 0x0000000c}, {0x2a, 0x00000002},
- {0x2b, 0x00000808}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000003},
- {0x2b, 0x00000808}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000004},
- {0x2b, 0x00000808}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000005},
- {0x2b, 0x00000808}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000006},
- {0x2b, 0x00000709}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000007},
- {0x2b, 0x00000709}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000008},
- {0x2b, 0x0000060a}, {0x2b, 0x0004b333},
- {0x2c, 0x0000000d}, {0x2a, 0x00000009},
- {0x2b, 0x0000060a}, {0x2b, 0x00053333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000a},
- {0x2b, 0x0000060a}, {0x2b, 0x0005b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000b},
- {0x2b, 0x0000060a}, {0x2b, 0x00063333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000c},
- {0x2b, 0x0000060a}, {0x2b, 0x0006b333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000d},
- {0x2b, 0x0000060a}, {0x2b, 0x00073333},
- {0x2c, 0x0000000d}, {0x2a, 0x0000000e},
- {0x2b, 0x0000050b}, {0x2b, 0x00066666},
- {0x2c, 0x0000001a}, {0x2a, 0x000e0000},
- {0x10, 0x0004000f}, {0x11, 0x000e31fc},
- {0x10, 0x0006000f}, {0x11, 0x000ff9f8},
- {0x10, 0x0002000f}, {0x11, 0x000203f9},
- {0x10, 0x0003000f}, {0x11, 0x000ff500},
- {0x10, 0x00000000}, {0x11, 0x00000000},
- {0x10, 0x0008000f}, {0x11, 0x0003f100},
- {0x10, 0x0009000f}, {0x11, 0x00023100},
- {0x12, 0x000d8000}, {0x12, 0x00090000},
- {0x12, 0x00051000}, {0x12, 0x00012000},
- {0x13, 0x00028fb4}, {0x13, 0x00024fa8},
- {0x13, 0x000207a4}, {0x13, 0x0001c3b0},
- {0x13, 0x000183a4}, {0x13, 0x00014398},
- {0x13, 0x000101a4}, {0x13, 0x0000c198},
- {0x13, 0x000080a4}, {0x13, 0x00004098},
- {0x13, 0x00000000}, {0x14, 0x0001944c},
- {0x14, 0x00059444}, {0x14, 0x0009944c},
- {0x14, 0x000d9444}, {0x15, 0x0000f405},
- {0x15, 0x0004f405}, {0x15, 0x0008f405},
- {0x15, 0x000cf405}, {0x16, 0x000e0330},
- {0x16, 0x000a0330}, {0x16, 0x00060330},
- {0x16, 0x00020330}, {0x00, 0x00010159},
- {0x18, 0x0000f401}, {0xfe, 0x00000000},
- {0xfe, 0x00000000}, {0x1f, 0x00080003},
- {0xfe, 0x00000000}, {0xfe, 0x00000000},
- {0x1e, 0x00044457}, {0x1f, 0x00080000},
- {0x00, 0x00030159},
- {0xff, 0xffffffff}
-};
-
static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
{ /* RF_A */
.hssiparm1 = REG_FPGA0_XA_HSSI_PARM1,
@@ -1223,7 +627,7 @@ static struct rtl8xxxu_rfregs rtl8xxxu_rfregs[] = {
},
};
-static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
+const u32 rtl8xxxu_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
REG_OFDM0_XA_RX_IQ_IMBALANCE,
REG_OFDM0_XB_RX_IQ_IMBALANCE,
REG_OFDM0_ENERGY_CCA_THRES,
@@ -1235,7 +639,7 @@ static const u32 rtl8723au_iqk_phy_iq_bb_reg[RTL8XXXU_BB_REGS] = {
REG_OFDM0_RX_IQ_EXT_ANTA
};
-static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
+u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1255,7 +659,7 @@ static u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
+u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1275,7 +679,7 @@ static u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
+u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
{
struct usb_device *udev = priv->udev;
int len;
@@ -1295,7 +699,7 @@ static u32 rtl8xxxu_read32(struct rtl8xxxu_priv *priv, u16 addr)
return data;
}
-static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
+int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1315,7 +719,7 @@ static int rtl8xxxu_write8(struct rtl8xxxu_priv *priv, u16 addr, u8 val)
return ret;
}
-static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
+int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1334,7 +738,7 @@ static int rtl8xxxu_write16(struct rtl8xxxu_priv *priv, u16 addr, u16 val)
return ret;
}
-static int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
+int rtl8xxxu_write32(struct rtl8xxxu_priv *priv, u16 addr, u32 val)
{
struct usb_device *udev = priv->udev;
int ret;
@@ -1393,8 +797,8 @@ write_error:
return -EAGAIN;
}
-static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
- enum rtl8xxxu_rfpath path, u8 reg)
+u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg)
{
u32 hssia, val32, retval;
@@ -1438,11 +842,11 @@ static u32 rtl8xxxu_read_rfreg(struct rtl8xxxu_priv *priv,
* have write issues in high temperature conditions. We may have to
* retry writing them.
*/
-static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
- enum rtl8xxxu_rfpath path, u8 reg, u32 data)
+int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
+ enum rtl8xxxu_rfpath path, u8 reg, u32 data)
{
int ret, retval;
- u32 dataaddr;
+ u32 dataaddr, val32;
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_RFREG_WRITE)
dev_info(&priv->udev->dev, "%s(%02x) = 0x%06x\n",
@@ -1451,6 +855,12 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
data &= FPGA0_LSSI_PARM_DATA_MASK;
dataaddr = (reg << FPGA0_LSSI_PARM_ADDR_SHIFT) | data;
+ if (priv->rtl_chip == RTL8192E) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 &= ~0x20000;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ }
+
/* Use XB for path B */
ret = rtl8xxxu_write32(priv, rtl8xxxu_rfregs[path].lssiparm, dataaddr);
if (ret != sizeof(dataaddr))
@@ -1460,11 +870,17 @@ static int rtl8xxxu_write_rfreg(struct rtl8xxxu_priv *priv,
udelay(1);
+ if (priv->rtl_chip == RTL8192E) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_POWER_SAVE);
+ val32 |= 0x20000;
+ rtl8xxxu_write32(priv, REG_FPGA0_POWER_SAVE, val32);
+ }
+
return retval;
}
-static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
- struct h2c_cmd *h2c, int len)
+int
+rtl8xxxu_gen1_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
struct device *dev = &priv->udev->dev;
int mbox_nr, retry, retval = 0;
@@ -1475,8 +891,7 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
mbox_nr = priv->next_mbox;
mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
- mbox_ext_reg = priv->fops->mbox_ext_reg +
- (mbox_nr * priv->fops->mbox_ext_width);
+ mbox_ext_reg = REG_HMBOX_EXT_0 + (mbox_nr * 2);
/*
* MBOX ready?
@@ -1498,19 +913,10 @@ static int rtl8723a_h2c_cmd(struct rtl8xxxu_priv *priv,
* Need to swap as it's being swapped again by rtl8xxxu_write16/32()
*/
if (len > sizeof(u32)) {
- if (priv->fops->mbox_ext_width == 4) {
- rtl8xxxu_write32(priv, mbox_ext_reg,
- le32_to_cpu(h2c->raw_wide.ext));
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
- dev_info(dev, "H2C_EXT %08x\n",
- le32_to_cpu(h2c->raw_wide.ext));
- } else {
- rtl8xxxu_write16(priv, mbox_ext_reg,
- le16_to_cpu(h2c->raw.ext));
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
- dev_info(dev, "H2C_EXT %04x\n",
- le16_to_cpu(h2c->raw.ext));
- }
+ rtl8xxxu_write16(priv, mbox_ext_reg, le16_to_cpu(h2c->raw.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %04x\n",
+ le16_to_cpu(h2c->raw.ext));
}
rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
@@ -1523,28 +929,58 @@ error:
return retval;
}
-static void rtl8723bu_write_btreg(struct rtl8xxxu_priv *priv, u8 reg, u8 data)
+int
+rtl8xxxu_gen2_h2c_cmd(struct rtl8xxxu_priv *priv, struct h2c_cmd *h2c, int len)
{
- struct h2c_cmd h2c;
- int reqnum = 0;
+ struct device *dev = &priv->udev->dev;
+ int mbox_nr, retry, retval = 0;
+ int mbox_reg, mbox_ext_reg;
+ u8 val8;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
- h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
- h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
- h2c.bt_mp_oper.data = data;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+ mutex_lock(&priv->h2c_mutex);
- reqnum++;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_mp_oper.cmd = H2C_8723B_BT_MP_OPER;
- h2c.bt_mp_oper.operreq = 0 | (reqnum << 4);
- h2c.bt_mp_oper.opcode = BT_MP_OP_WRITE_REG_VALUE;
- h2c.bt_mp_oper.addr = reg;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_mp_oper));
+ mbox_nr = priv->next_mbox;
+ mbox_reg = REG_HMBOX_0 + (mbox_nr * 4);
+ mbox_ext_reg = REG_HMBOX_EXT0_8723B + (mbox_nr * 4);
+
+ /*
+ * MBOX ready?
+ */
+ retry = 100;
+ do {
+ val8 = rtl8xxxu_read8(priv, REG_HMTFR);
+ if (!(val8 & BIT(mbox_nr)))
+ break;
+ } while (retry--);
+
+ if (!retry) {
+ dev_info(dev, "%s: Mailbox busy\n", __func__);
+ retval = -EBUSY;
+ goto error;
+ }
+
+ /*
+ * Need to swap as it's being swapped again by rtl8xxxu_write16/32()
+ */
+ if (len > sizeof(u32)) {
+ rtl8xxxu_write32(priv, mbox_ext_reg,
+ le32_to_cpu(h2c->raw_wide.ext));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C_EXT %08x\n",
+ le32_to_cpu(h2c->raw_wide.ext));
+ }
+ rtl8xxxu_write32(priv, mbox_reg, le32_to_cpu(h2c->raw.data));
+ if (rtl8xxxu_debug & RTL8XXXU_DEBUG_H2C)
+ dev_info(dev, "H2C %08x\n", le32_to_cpu(h2c->raw.data));
+
+ priv->next_mbox = (mbox_nr + 1) % H2C_MAX_MBOX;
+
+error:
+ mutex_unlock(&priv->h2c_mutex);
+ return retval;
}
-static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_enable_rf(struct rtl8xxxu_priv *priv)
{
u8 val8;
u32 val32;
@@ -1566,7 +1002,7 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
val32 &= ~OFDM_RF_PATH_TX_MASK;
if (priv->tx_paths == 2)
val32 |= OFDM_RF_PATH_TX_A | OFDM_RF_PATH_TX_B;
- else if (priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c)
+ else if (priv->rtl_chip == RTL8192C || priv->rtl_chip == RTL8191C)
val32 |= OFDM_RF_PATH_TX_B;
else
val32 |= OFDM_RF_PATH_TX_A;
@@ -1588,13 +1024,11 @@ static void rtl8723a_enable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_TXPAUSE, 0x00);
}
-static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_disable_rf(struct rtl8xxxu_priv *priv)
{
u8 sps0;
u32 val32;
- rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
sps0 = rtl8xxxu_read8(priv, REG_SPS0_CTRL);
/* RF RX code for preamble power saving */
@@ -1629,8 +1063,7 @@ static void rtl8723a_disable_rf(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_SPS0_CTRL, sps0);
}
-
-static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_stop_tx_beacon(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -1654,7 +1087,7 @@ static void rtl8723a_stop_tx_beacon(struct rtl8xxxu_priv *priv)
*
* Note: We index from 0 in the code
*/
-static int rtl8723a_channel_to_group(int channel)
+static int rtl8xxxu_gen1_channel_to_group(int channel)
{
int group;
@@ -1668,7 +1101,10 @@ static int rtl8723a_channel_to_group(int channel)
return group;
}
-static int rtl8723b_channel_to_group(int channel)
+/*
+ * Valid for rtl8723bu and rtl8192eu
+ */
+int rtl8xxxu_gen2_channel_to_group(int channel)
{
int group;
@@ -1686,7 +1122,7 @@ static int rtl8723b_channel_to_group(int channel)
return group;
}
-static void rtl8723au_config_channel(struct ieee80211_hw *hw)
+void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
u32 val32, rsr;
@@ -1808,7 +1244,7 @@ static void rtl8723au_config_channel(struct ieee80211_hw *hw)
}
}
-static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
+void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
u32 val32, rsr;
@@ -1938,22 +1374,34 @@ static void rtl8723bu_config_channel(struct ieee80211_hw *hw)
}
}
-static void
-rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
+void
+rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
{
+ struct rtl8xxxu_power_base *power_base = priv->power_base;
u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
u8 val8;
int group, i;
- group = rtl8723a_channel_to_group(channel);
+ group = rtl8xxxu_gen1_channel_to_group(channel);
- cck[0] = priv->cck_tx_power_index_A[group];
- cck[1] = priv->cck_tx_power_index_B[group];
+ cck[0] = priv->cck_tx_power_index_A[group] - 1;
+ cck[1] = priv->cck_tx_power_index_B[group] - 1;
+
+ if (priv->hi_pa) {
+ if (cck[0] > 0x20)
+ cck[0] = 0x20;
+ if (cck[1] > 0x20)
+ cck[1] = 0x20;
+ }
ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
+ if (ofdm[0])
+ ofdm[0] -= 1;
+ if (ofdm[1])
+ ofdm[1] -= 1;
ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -2009,27 +1457,39 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
ofdmbase[0] << 16 | ofdmbase[0] << 24;
ofdm_b = ofdmbase[1] | ofdmbase[1] << 8 |
ofdmbase[1] << 16 | ofdmbase[1] << 24;
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06, ofdm_b);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24, ofdm_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06,
+ ofdm_a + power_base->reg_0e00);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE18_06,
+ ofdm_b + power_base->reg_0830);
+
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24,
+ ofdm_a + power_base->reg_0e04);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_RATE54_24,
+ ofdm_b + power_base->reg_0834);
mcs_a = mcsbase[0] | mcsbase[0] << 8 |
mcsbase[0] << 16 | mcsbase[0] << 24;
mcs_b = mcsbase[1] | mcsbase[1] << 8 |
mcsbase[1] << 16 | mcsbase[1] << 24;
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00,
+ mcs_a + power_base->reg_0e10);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS03_MCS00,
+ mcs_b + power_base->reg_083c);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04,
+ mcs_a + power_base->reg_0e14);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS07_MCS04,
+ mcs_b + power_base->reg_0848);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08, mcs_a);
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS11_MCS08,
+ mcs_a + power_base->reg_0e18);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS11_MCS08,
+ mcs_b + power_base->reg_084c);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12, mcs_a);
+ rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
+ mcs_a + power_base->reg_0e1c);
for (i = 0; i < 3; i++) {
if (i != 2)
val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
@@ -2037,7 +1497,8 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
}
- rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12, mcs_b);
+ rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
+ mcs_b + power_base->reg_0868);
for (i = 0; i < 3; i++) {
if (i != 2)
val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
@@ -2047,45 +1508,6 @@ rtl8723a_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
}
}
-static void
-rtl8723b_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
-{
- u32 val32, ofdm, mcs;
- u8 cck, ofdmbase, mcsbase;
- int group, tx_idx;
-
- tx_idx = 0;
- group = rtl8723b_channel_to_group(channel);
-
- cck = priv->cck_tx_power_index_B[group];
- val32 = rtl8xxxu_read32(priv, REG_TX_AGC_A_CCK1_MCS32);
- val32 &= 0xffff00ff;
- val32 |= (cck << 8);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_CCK1_MCS32, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11);
- val32 &= 0xff;
- val32 |= ((cck << 8) | (cck << 16) | (cck << 24));
- rtl8xxxu_write32(priv, REG_TX_AGC_B_CCK11_A_CCK2_11, val32);
-
- ofdmbase = priv->ht40_1s_tx_power_index_B[group];
- ofdmbase += priv->ofdm_tx_power_diff[tx_idx].b;
- ofdm = ofdmbase | ofdmbase << 8 | ofdmbase << 16 | ofdmbase << 24;
-
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE18_06, ofdm);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_RATE54_24, ofdm);
-
- mcsbase = priv->ht40_1s_tx_power_index_B[group];
- if (ht40)
- mcsbase += priv->ht40_tx_power_diff[tx_idx++].b;
- else
- mcsbase += priv->ht20_tx_power_diff[tx_idx++].b;
- mcs = mcsbase | mcsbase << 8 | mcsbase << 16 | mcsbase << 24;
-
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS03_MCS00, mcs);
- rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS07_MCS04, mcs);
-}
-
static void rtl8xxxu_set_linktype(struct rtl8xxxu_priv *priv,
enum nl80211_iftype linktype)
{
@@ -2191,11 +1613,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
if (val32 & SYS_CFG_BT_FUNC) {
if (priv->chip_cut >= 3) {
sprintf(priv->chip_name, "8723BU");
- priv->rtlchip = 0x8723b;
+ priv->rtl_chip = RTL8723B;
} else {
sprintf(priv->chip_name, "8723AU");
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8723a;
+ priv->rtl_chip = RTL8723A;
}
priv->rf_paths = 1;
@@ -2213,19 +1635,20 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
} else if (val32 & SYS_CFG_TYPE_ID) {
bonding = rtl8xxxu_read32(priv, REG_HPON_FSM);
bonding &= HPON_FSM_BONDING_MASK;
- if (priv->chip_cut >= 3) {
+ if (priv->fops->tx_desc_size ==
+ sizeof(struct rtl8xxxu_txdesc40)) {
if (bonding == HPON_FSM_BONDING_1T2R) {
sprintf(priv->chip_name, "8191EU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 1;
- priv->rtlchip = 0x8191e;
+ priv->rtl_chip = RTL8191E;
} else {
sprintf(priv->chip_name, "8192EU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 2;
- priv->rtlchip = 0x8192e;
+ priv->rtl_chip = RTL8192E;
}
} else if (bonding == HPON_FSM_BONDING_1T2R) {
sprintf(priv->chip_name, "8191CU");
@@ -2233,14 +1656,14 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->rx_paths = 2;
priv->tx_paths = 1;
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8191c;
+ priv->rtl_chip = RTL8191C;
} else {
sprintf(priv->chip_name, "8192CU");
priv->rf_paths = 2;
priv->rx_paths = 2;
priv->tx_paths = 2;
priv->usb_interrupts = 1;
- priv->rtlchip = 0x8192c;
+ priv->rtl_chip = RTL8192C;
}
priv->has_wifi = 1;
} else {
@@ -2248,15 +1671,15 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
priv->rf_paths = 1;
priv->rx_paths = 1;
priv->tx_paths = 1;
- priv->rtlchip = 0x8188c;
+ priv->rtl_chip = RTL8188C;
priv->usb_interrupts = 1;
priv->has_wifi = 1;
}
- switch (priv->rtlchip) {
- case 0x8188e:
- case 0x8192e:
- case 0x8723b:
+ switch (priv->rtl_chip) {
+ case RTL8188E:
+ case RTL8192E:
+ case RTL8723B:
switch (val32 & SYS_CFG_VENDOR_EXT_MASK) {
case SYS_CFG_VENDOR_ID_TSMC:
sprintf(priv->chip_vendor, "TSMC");
@@ -2326,241 +1749,6 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv)
return 0;
}
-static int rtl8723au_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8723au_efuse *efuse = &priv->efuse_wifi.efuse8723;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A,
- efuse->cck_tx_power_index_A,
- sizeof(efuse->cck_tx_power_index_A));
- memcpy(priv->cck_tx_power_index_B,
- efuse->cck_tx_power_index_B,
- sizeof(efuse->cck_tx_power_index_B));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->ht40_1s_tx_power_index_A,
- sizeof(efuse->ht40_1s_tx_power_index_A));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->ht40_1s_tx_power_index_B,
- sizeof(efuse->ht40_1s_tx_power_index_B));
-
- memcpy(priv->ht20_tx_power_index_diff,
- efuse->ht20_tx_power_index_diff,
- sizeof(efuse->ht20_tx_power_index_diff));
- memcpy(priv->ofdm_tx_power_index_diff,
- efuse->ofdm_tx_power_index_diff,
- sizeof(efuse->ofdm_tx_power_index_diff));
-
- memcpy(priv->ht40_max_power_offset,
- efuse->ht40_max_power_offset,
- sizeof(efuse->ht40_max_power_offset));
- memcpy(priv->ht20_max_power_offset,
- efuse->ht20_max_power_offset,
- sizeof(efuse->ht20_max_power_offset));
-
- if (priv->efuse_wifi.efuse8723.version >= 0x01) {
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723.xtal_k & 0x3f;
- }
- dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.41s\n",
- efuse->device_name);
- return 0;
-}
-
-static int rtl8723bu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8723bu_efuse *efuse = &priv->efuse_wifi.efuse8723bu;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A, efuse->tx_power_index_A.cck_base,
- sizeof(efuse->tx_power_index_A.cck_base));
- memcpy(priv->cck_tx_power_index_B, efuse->tx_power_index_B.cck_base,
- sizeof(efuse->tx_power_index_B.cck_base));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->tx_power_index_A.ht40_base,
- sizeof(efuse->tx_power_index_A.ht40_base));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->tx_power_index_B.ht40_base,
- sizeof(efuse->tx_power_index_B.ht40_base));
-
- priv->ofdm_tx_power_diff[0].a =
- efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
- priv->ofdm_tx_power_diff[0].b =
- efuse->tx_power_index_B.ht20_ofdm_1s_diff.a;
-
- priv->ht20_tx_power_diff[0].a =
- efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
- priv->ht20_tx_power_diff[0].b =
- efuse->tx_power_index_B.ht20_ofdm_1s_diff.b;
-
- priv->ht40_tx_power_diff[0].a = 0;
- priv->ht40_tx_power_diff[0].b = 0;
-
- for (i = 1; i < RTL8723B_TX_COUNT; i++) {
- priv->ofdm_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ofdm;
- priv->ofdm_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ofdm;
-
- priv->ht20_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ht20;
- priv->ht20_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ht20;
-
- priv->ht40_tx_power_diff[i].a =
- efuse->tx_power_index_A.pwr_diff[i - 1].ht40;
- priv->ht40_tx_power_diff[i].b =
- efuse->tx_power_index_B.pwr_diff[i - 1].ht40;
- }
-
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8723bu.xtal_k & 0x3f;
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.41s\n", efuse->device_name);
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- int i;
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8723bu_efuse));
- for (i = 0; i < sizeof(struct rtl8723bu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
-
- return 0;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8192cu_efuse *efuse = &priv->efuse_wifi.efuse8192;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- memcpy(priv->cck_tx_power_index_A,
- efuse->cck_tx_power_index_A,
- sizeof(efuse->cck_tx_power_index_A));
- memcpy(priv->cck_tx_power_index_B,
- efuse->cck_tx_power_index_B,
- sizeof(efuse->cck_tx_power_index_B));
-
- memcpy(priv->ht40_1s_tx_power_index_A,
- efuse->ht40_1s_tx_power_index_A,
- sizeof(efuse->ht40_1s_tx_power_index_A));
- memcpy(priv->ht40_1s_tx_power_index_B,
- efuse->ht40_1s_tx_power_index_B,
- sizeof(efuse->ht40_1s_tx_power_index_B));
- memcpy(priv->ht40_2s_tx_power_index_diff,
- efuse->ht40_2s_tx_power_index_diff,
- sizeof(efuse->ht40_2s_tx_power_index_diff));
-
- memcpy(priv->ht20_tx_power_index_diff,
- efuse->ht20_tx_power_index_diff,
- sizeof(efuse->ht20_tx_power_index_diff));
- memcpy(priv->ofdm_tx_power_index_diff,
- efuse->ofdm_tx_power_index_diff,
- sizeof(efuse->ofdm_tx_power_index_diff));
-
- memcpy(priv->ht40_max_power_offset,
- efuse->ht40_max_power_offset,
- sizeof(efuse->ht40_max_power_offset));
- memcpy(priv->ht20_max_power_offset,
- efuse->ht20_max_power_offset,
- sizeof(efuse->ht20_max_power_offset));
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n",
- efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.20s\n",
- efuse->device_name);
-
- if (efuse->rf_regulatory & 0x20) {
- sprintf(priv->chip_name, "8188RU");
- priv->hi_pa = 1;
- }
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192cu_efuse));
- for (i = 0; i < sizeof(struct rtl8192cu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
- return 0;
-}
-
-#endif
-
-static int rtl8192eu_parse_efuse(struct rtl8xxxu_priv *priv)
-{
- struct rtl8192eu_efuse *efuse = &priv->efuse_wifi.efuse8192eu;
- int i;
-
- if (efuse->rtl_id != cpu_to_le16(0x8129))
- return -EINVAL;
-
- ether_addr_copy(priv->mac_addr, efuse->mac_addr);
-
- priv->has_xtalk = 1;
- priv->xtalk = priv->efuse_wifi.efuse8192eu.xtal_k & 0x3f;
-
- dev_info(&priv->udev->dev, "Vendor: %.7s\n", efuse->vendor_name);
- dev_info(&priv->udev->dev, "Product: %.11s\n", efuse->device_name);
- dev_info(&priv->udev->dev, "Serial: %.11s\n", efuse->serial);
-
- if (rtl8xxxu_debug & RTL8XXXU_DEBUG_EFUSE) {
- unsigned char *raw = priv->efuse_wifi.raw;
-
- dev_info(&priv->udev->dev,
- "%s: dumping efuse (0x%02zx bytes):\n",
- __func__, sizeof(struct rtl8192eu_efuse));
- for (i = 0; i < sizeof(struct rtl8192eu_efuse); i += 8) {
- dev_info(&priv->udev->dev, "%02x: "
- "%02x %02x %02x %02x %02x %02x %02x %02x\n", i,
- raw[i], raw[i + 1], raw[i + 2],
- raw[i + 3], raw[i + 4], raw[i + 5],
- raw[i + 6], raw[i + 7]);
- }
- }
- /*
- * Temporarily disable 8192eu support
- */
- return -EINVAL;
- return 0;
-}
-
static int
rtl8xxxu_read_efuse8(struct rtl8xxxu_priv *priv, u16 offset, u8 *data)
{
@@ -2708,36 +1896,11 @@ exit:
return ret;
}
-static void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 sys_func;
-
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
- val8 &= ~BIT(0);
- rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
-
- sys_func = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- sys_func &= ~SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
-
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
-
- sys_func |= SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
-}
-
-static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_reset_8051(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 sys_func;
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
-
val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
val8 &= ~BIT(0);
rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
@@ -2746,10 +1909,6 @@ static void rtl8723bu_reset_8051(struct rtl8xxxu_priv *priv)
sys_func &= ~SYS_FUNC_CPU_ENABLE;
rtl8xxxu_write16(priv, REG_SYS_FUNC, sys_func);
- val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_RSV_CTRL, val8);
-
val8 = rtl8xxxu_read8(priv, REG_RSV_CTRL + 1);
val8 |= BIT(0);
rtl8xxxu_write8(priv, REG_RSV_CTRL + 1, val8);
@@ -2806,7 +1965,7 @@ static int rtl8xxxu_start_firmware(struct rtl8xxxu_priv *priv)
/*
* Init H2C command
*/
- if (priv->rtlchip == 0x8723b)
+ if (priv->rtl_chip == RTL8723B)
rtl8xxxu_write8(priv, REG_HMTFR, 0x0f);
exit:
return ret;
@@ -2893,7 +2052,7 @@ fw_abort:
return ret;
}
-static int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
+int rtl8xxxu_load_firmware(struct rtl8xxxu_priv *priv, char *fw_name)
{
struct device *dev = &priv->udev->dev;
const struct firmware *fw;
@@ -2942,78 +2101,7 @@ exit:
return ret;
}
-static int rtl8723au_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- switch (priv->chip_cut) {
- case 0:
- fw_name = "/*(DEBLOBBED)*/";
- break;
- case 1:
- if (priv->enable_bluetooth)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- break;
- default:
- return -EINVAL;
- }
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
- return ret;
-}
-
-static int rtl8723bu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- if (priv->enable_bluetooth)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
- return ret;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- if (!priv->vendor_umc)
- fw_name = "/*(DEBLOBBED)*/";
- else if (priv->chip_cut || priv->rtlchip == 0x8192c)
- fw_name = "/*(DEBLOBBED)*/";
- else
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
-
- return ret;
-}
-
-#endif
-
-static int rtl8192eu_load_firmware(struct rtl8xxxu_priv *priv)
-{
- char *fw_name;
- int ret;
-
- fw_name = "/*(DEBLOBBED)*/";
-
- ret = rtl8xxxu_load_firmware(priv, fw_name);
-
- return ret;
-}
-
-static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
{
u16 val16;
int i = 100;
@@ -3040,47 +2128,10 @@ static void rtl8xxxu_firmware_self_reset(struct rtl8xxxu_priv *priv)
}
}
-static void rtl8723bu_phy_init_antenna_selection(struct rtl8xxxu_priv *priv)
-{
- u32 val32;
-
- val32 = rtl8xxxu_read32(priv, 0x64);
- val32 &= ~(BIT(20) | BIT(24));
- rtl8xxxu_write32(priv, 0x64, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
- val32 &= ~BIT(4);
- rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_GPIO_MUXCFG);
- val32 |= BIT(3);
- rtl8xxxu_write32(priv, REG_GPIO_MUXCFG, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 |= BIT(24);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 &= ~BIT(23);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
- val32 |= (BIT(0) | BIT(1));
- rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_CTRL_ANTA_SRC);
- val32 &= 0xffffff00;
- val32 |= 0x77;
- rtl8xxxu_write32(priv, REG_RFE_CTRL_ANTA_SRC, val32);
-
- val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
- val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
-}
-
static int
-rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
+rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv)
{
+ struct rtl8xxxu_reg8val *array = priv->fops->mactable;
int i, ret;
u16 reg;
u8 val;
@@ -3095,19 +2146,20 @@ rtl8xxxu_init_mac(struct rtl8xxxu_priv *priv, struct rtl8xxxu_reg8val *array)
ret = rtl8xxxu_write8(priv, reg, val);
if (ret != 1) {
dev_warn(&priv->udev->dev,
- "Failed to initialize MAC\n");
+ "Failed to initialize MAC "
+ "(reg: %04x, val %02x)\n", reg, val);
return -EAGAIN;
}
}
- if (priv->rtlchip != 0x8723b)
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
rtl8xxxu_write8(priv, REG_MAX_AGGR_NUM, 0x0a);
return 0;
}
-static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_reg32val *array)
+int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_reg32val *array)
{
int i, ret;
u16 reg;
@@ -3132,50 +2184,30 @@ static int rtl8xxxu_init_phy_regs(struct rtl8xxxu_priv *priv,
return 0;
}
-/*
- * Most of this is black magic retrieved from the old rtl8723au driver
- */
-static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_init_phy_bb(struct rtl8xxxu_priv *priv)
{
u8 val8, ldoa15, ldov12d, lpldo, ldohci12;
u16 val16;
u32 val32;
- /*
- * Todo: The vendor driver maintains a table of PHY register
- * addresses, which is initialized here. Do we need this?
- */
-
- if (priv->rtlchip == 0x8723b) {
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB |
- SYS_FUNC_DIO_RF;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
- } else {
- val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
- udelay(2);
- val8 |= AFE_PLL_320_ENABLE;
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
- udelay(2);
+ val8 = rtl8xxxu_read8(priv, REG_AFE_PLL_CTRL);
+ udelay(2);
+ val8 |= AFE_PLL_320_ENABLE;
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL, val8);
+ udelay(2);
- rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
- udelay(2);
+ rtl8xxxu_write8(priv, REG_AFE_PLL_CTRL + 1, 0xff);
+ udelay(2);
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
- }
+ val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
+ val16 |= SYS_FUNC_BB_GLB_RSTN | SYS_FUNC_BBRSTB;
+ rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
- if (priv->rtlchip != 0x8723b) {
- /* AFE_XTAL_RF_GATE (bit 14) if addressing as 32 bit register */
- val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
- val32 &= ~AFE_XTAL_RF_GATE;
- if (priv->has_bluetooth)
- val32 &= ~AFE_XTAL_BT_GATE;
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
- }
+ val32 = rtl8xxxu_read32(priv, REG_AFE_XTAL_CTRL);
+ val32 &= ~AFE_XTAL_RF_GATE;
+ if (priv->has_bluetooth)
+ val32 &= ~AFE_XTAL_BT_GATE;
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, val32);
/* 6. 0x1f[7:0] = 0x07 */
val8 = RF_ENABLE | RF_RSTB | RF_SDMRSTB;
@@ -3185,21 +2217,36 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_init_phy_regs(priv, rtl8188ru_phy_1t_highpa_table);
else if (priv->tx_paths == 2)
rtl8xxxu_init_phy_regs(priv, rtl8192cu_phy_2t_init_table);
- else if (priv->rtlchip == 0x8723b) {
- /*
- * Why?
- */
- rtl8xxxu_write8(priv, REG_SYS_FUNC, 0xe3);
- rtl8xxxu_write8(priv, REG_AFE_XTAL_CTRL + 1, 0x80);
- rtl8xxxu_init_phy_regs(priv, rtl8723b_phy_1t_init_table);
- } else
+ else
rtl8xxxu_init_phy_regs(priv, rtl8723a_phy_1t_init_table);
-
- if (priv->rtlchip == 0x8188c && priv->hi_pa &&
+ if (priv->rtl_chip == RTL8188R && priv->hi_pa &&
priv->vendor_umc && priv->chip_cut == 1)
rtl8xxxu_write8(priv, REG_OFDM0_AGC_PARM1 + 2, 0x50);
+ if (priv->hi_pa)
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
+ else
+ rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
+
+ ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
+ ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
+ ldohci12 = 0x57;
+ lpldo = 1;
+ val32 = (lpldo << 24) | (ldohci12 << 16) | (ldov12d << 8) | ldoa15;
+ rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
+}
+
+/*
+ * Most of this is black magic retrieved from the old rtl8723au driver
+ */
+static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
+
+ priv->fops->init_phy_bb(priv);
+
if (priv->tx_paths == 1 && priv->rx_paths == 2) {
/*
* For 1T2R boards, patch the registers.
@@ -3217,8 +2264,10 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_FPGA1_TX_INFO, val32);
val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
- val32 &= 0xff000000;
- val32 |= 0x45000000;
+ val32 &= ~CCK0_AFE_RX_MASK;
+ val32 &= 0x00ffffff;
+ val32 |= 0x40000000;
+ val32 |= CCK0_AFE_RX_ANT_B;
rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
val32 = rtl8xxxu_read32(priv, REG_OFDM0_TRX_PATH_ENABLE);
@@ -3258,13 +2307,6 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_TX_TO_TX, val32);
}
- if (priv->rtlchip == 0x8723b)
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_8723bu_table);
- else if (priv->hi_pa)
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_highpa_table);
- else
- rtl8xxxu_init_phy_regs(priv, rtl8xxx_agc_standard_table);
-
if (priv->has_xtalk) {
val32 = rtl8xxxu_read32(priv, REG_MAC_PHY_CTRL);
@@ -3275,16 +2317,8 @@ static int rtl8xxxu_init_phy_bb(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_MAC_PHY_CTRL, val32);
}
- if (priv->rtlchip != 0x8723bu) {
- ldoa15 = LDOA15_ENABLE | LDOA15_OBUF;
- ldov12d = LDOV12D_ENABLE | BIT(2) | (2 << LDOV12D_VADJ_SHIFT);
- ldohci12 = 0x57;
- lpldo = 1;
- val32 = (lpldo << 24) | (ldohci12 << 16) |
- (ldov12d << 8) | ldoa15;
-
- rtl8xxxu_write32(priv, REG_LDOA15_CTRL, val32);
- }
+ if (priv->rtl_chip == RTL8192E)
+ rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x000f81fb);
return 0;
}
@@ -3337,9 +2371,9 @@ static int rtl8xxxu_init_rf_regs(struct rtl8xxxu_priv *priv,
return 0;
}
-static int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
- struct rtl8xxxu_rfregval *table,
- enum rtl8xxxu_rfpath path)
+int rtl8xxxu_init_phy_rf(struct rtl8xxxu_priv *priv,
+ struct rtl8xxxu_rfregval *table,
+ enum rtl8xxxu_rfpath path)
{
u32 val32;
u16 val16, rfsi_rfenv;
@@ -3423,7 +2457,7 @@ static int rtl8xxxu_llt_write(struct rtl8xxxu_priv *priv, u8 address, u8 data)
return ret;
}
-static int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_init_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
{
int ret;
int i;
@@ -3454,7 +2488,7 @@ exit:
return ret;
}
-static int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
+int rtl8xxxu_auto_llt_table(struct rtl8xxxu_priv *priv, u8 last_tx_page)
{
u32 val32;
int ret = 0;
@@ -3598,9 +2632,8 @@ static int rtl8xxxu_init_queue_priority(struct rtl8xxxu_priv *priv)
return ret;
}
-static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
- bool iqk_ok, int result[][8],
- int candidate, bool tx_only)
+void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only)
{
u32 oldval, x, tx0_a, reg;
int y, tx0_c;
@@ -3676,9 +2709,8 @@ static void rtl8xxxu_fill_iqk_matrix_a(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, REG_OFDM0_RX_IQ_EXT_ANTA, val32);
}
-static void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv,
- bool iqk_ok, int result[][8],
- int candidate, bool tx_only)
+void rtl8xxxu_fill_iqk_matrix_b(struct rtl8xxxu_priv *priv, bool iqk_ok,
+ int result[][8], int candidate, bool tx_only)
{
u32 oldval, x, tx1_a, reg;
int y, tx1_c;
@@ -3810,8 +2842,8 @@ static bool rtl8xxxu_simularity_compare(struct rtl8xxxu_priv *priv,
return false;
}
-static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
- int result[][8], int c1, int c2)
+bool rtl8xxxu_gen2_simularity_compare(struct rtl8xxxu_priv *priv,
+ int result[][8], int c1, int c2)
{
u32 i, j, diff, simubitmap, bound = 0;
int candidate[2] = {-1, -1}; /* for path A and path B */
@@ -3895,7 +2927,7 @@ static bool rtl8723bu_simularity_compare(struct rtl8xxxu_priv *priv,
return false;
}
-static void
+void
rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
{
int i;
@@ -3906,8 +2938,8 @@ rtl8xxxu_save_mac_regs(struct rtl8xxxu_priv *priv, const u32 *reg, u32 *backup)
backup[i] = rtl8xxxu_read32(priv, reg[i]);
}
-static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
- const u32 *reg, u32 *backup)
+void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
+ const u32 *reg, u32 *backup)
{
int i;
@@ -3917,8 +2949,8 @@ static void rtl8xxxu_restore_mac_regs(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, reg[i], backup[i]);
}
-static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
- u32 *backup, int count)
+void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
{
int i;
@@ -3926,8 +2958,8 @@ static void rtl8xxxu_save_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
backup[i] = rtl8xxxu_read32(priv, regs[i]);
}
-static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
- u32 *backup, int count)
+void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
+ u32 *backup, int count)
{
int i;
@@ -3936,8 +2968,8 @@ static void rtl8xxxu_restore_regs(struct rtl8xxxu_priv *priv, const u32 *regs,
}
-static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
- bool path_a_on)
+void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
+ bool path_a_on)
{
u32 path_on;
int i;
@@ -3956,8 +2988,8 @@ static void rtl8xxxu_path_adda_on(struct rtl8xxxu_priv *priv, const u32 *regs,
rtl8xxxu_write32(priv, regs[i], path_on);
}
-static void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
- const u32 *regs, u32 *backup)
+void rtl8xxxu_mac_calibration(struct rtl8xxxu_priv *priv,
+ const u32 *regs, u32 *backup)
{
int i = 0;
@@ -4062,369 +3094,6 @@ out:
return result;
}
-static int rtl8723bu_iqk_path_a(struct rtl8xxxu_priv *priv)
-{
- u32 reg_eac, reg_e94, reg_e9c, path_sel, val32;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * Enable path A PA in TX IQK mode
- */
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x20000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0003f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xc7f87);
-
- /*
- * Tx IQK setting
- */
- rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x821403ea);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x00462911);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * The vendor driver indicates the USB module is always using
- * S0S1 path 1 for the 8723bu. This may be different for 8192eu
- */
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
- * No trace of this in the 8192eu or 8188eu vendor drivers.
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
- reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
-
- val32 = (reg_e9c >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(28)) &&
- ((reg_e94 & 0x03ff0000) != 0x01420000) &&
- ((reg_e9c & 0x03ff0000) != 0x00420000) &&
- ((reg_e94 & 0x03ff0000) < 0x01100000) &&
- ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x01;
- else /* If TX not OK, ignore RX */
- goto out;
-
-out:
- return result;
-}
-
-static int rtl8723bu_rx_iqk_path_a(struct rtl8xxxu_priv *priv)
-{
- u32 reg_ea4, reg_eac, reg_e94, reg_e9c, path_sel, val32;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * Enable path A PA in TX IQK mode
- */
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
-
- /*
- * Tx IQK setting
- */
- rtl8xxxu_write32(priv, REG_TX_IQK, 0x01007c00);
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82160ff0);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x28110000);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a911);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /*
- * The vendor driver indicates the USB module is always using
- * S0S1 path 1 for the 8723bu. This may be different for 8192eu
- */
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Bit 12 seems to be BT_GRANT, and is only found in the 8723bu.
- * No trace of this in the 8192eu or 8188eu vendor drivers.
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_e94 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_A);
- reg_e9c = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_A);
-
- val32 = (reg_e9c >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(28)) &&
- ((reg_e94 & 0x03ff0000) != 0x01420000) &&
- ((reg_e9c & 0x03ff0000) != 0x00420000) &&
- ((reg_e94 & 0x03ff0000) < 0x01100000) &&
- ((reg_e94 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x01;
- else /* If TX not OK, ignore RX */
- goto out;
-
- val32 = 0x80007c00 | (reg_e94 &0x3ff0000) |
- ((reg_e9c & 0x3ff0000) >> 16);
- rtl8xxxu_write32(priv, REG_TX_IQK, val32);
-
- /*
- * Modify RX IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7d77);
-
- /*
- * PA, PAD setting
- */
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0xf80);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_55, 0x4021f);
-
- /*
- * RX IQK setting
- */
- rtl8xxxu_write32(priv, REG_RX_IQK, 0x01004800);
-
- /* path-A IQK setting */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x18008c1c);
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_B, 0x38008c1c);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_B, 0x38008c1c);
-
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_A, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_A, 0x2816001f);
- rtl8xxxu_write32(priv, REG_TX_IQK_PI_B, 0x82110000);
- rtl8xxxu_write32(priv, REG_RX_IQK_PI_B, 0x28110000);
-
- /* LO calibration setting */
- rtl8xxxu_write32(priv, REG_IQK_AGC_RSP, 0x0046a8d1);
-
- /*
- * Enter IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- if (priv->rf_paths > 1)
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000000);
- else
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00000280);
-
- /*
- * Disable BT
- */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00000800);
-
- /* One shot, path A LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf9000000);
- rtl8xxxu_write32(priv, REG_IQK_AGC_PTS, 0xf8000000);
-
- mdelay(1);
-
- /* Restore Ant Path */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, path_sel);
-#ifdef RTL8723BU_BT
- /* GNT_BT = 1 */
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, 0x00001800);
-#endif
-
- /*
- * Leave IQK mode
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_ea4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_A_2);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_DF, 0x780);
-
- val32 = (reg_eac >> 16) & 0x3ff;
- if (val32 & 0x200)
- val32 = 0x400 - val32;
-
- if (!(reg_eac & BIT(27)) &&
- ((reg_ea4 & 0x03ff0000) != 0x01320000) &&
- ((reg_eac & 0x03ff0000) != 0x00360000) &&
- ((reg_ea4 & 0x03ff0000) < 0x01100000) &&
- ((reg_ea4 & 0x03ff0000) > 0x00f00000) &&
- val32 < 0xf)
- result |= 0x02;
- else /* If TX not OK, ignore RX */
- goto out;
-out:
- return result;
-}
-
-#ifdef RTL8723BU_PATH_B
-static int rtl8723bu_iqk_path_b(struct rtl8xxxu_priv *priv)
-{
- u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc, path_sel;
- int result = 0;
-
- path_sel = rtl8xxxu_read32(priv, REG_S0S1_PATH_SWITCH);
-
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* One shot, path B LOK & IQK */
- rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000002);
- rtl8xxxu_write32(priv, REG_IQK_AGC_CONT, 0x00000000);
-
- mdelay(1);
-
- /* Check failed */
- reg_eac = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_A_2);
- reg_eb4 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
- reg_ebc = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
- reg_ec4 = rtl8xxxu_read32(priv, REG_RX_POWER_BEFORE_IQK_B_2);
- reg_ecc = rtl8xxxu_read32(priv, REG_RX_POWER_AFTER_IQK_B_2);
-
- if (!(reg_eac & BIT(31)) &&
- ((reg_eb4 & 0x03ff0000) != 0x01420000) &&
- ((reg_ebc & 0x03ff0000) != 0x00420000))
- result |= 0x01;
- else
- goto out;
-
- if (!(reg_eac & BIT(30)) &&
- (((reg_ec4 & 0x03ff0000) >> 16) != 0x132) &&
- (((reg_ecc & 0x03ff0000) >> 16) != 0x36))
- result |= 0x02;
- else
- dev_warn(&priv->udev->dev, "%s: Path B RX IQK failed!\n",
- __func__);
-out:
- return result;
-}
-#endif
-
static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
int result[][8], int t)
{
@@ -4489,9 +3158,12 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
- val32 |= (FPGA0_RF_PAPE | (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
- rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ if (!priv->no_pape) {
+ val32 = rtl8xxxu_read32(priv, REG_FPGA0_XAB_RF_SW_CTRL);
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
+ }
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XA_RF_INT_OE);
val32 &= ~BIT(10);
@@ -4627,249 +3299,18 @@ static void rtl8xxxu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
}
}
-static void rtl8723bu_phy_iqcalibrate(struct rtl8xxxu_priv *priv,
- int result[][8], int t)
-{
- struct device *dev = &priv->udev->dev;
- u32 i, val32;
- int path_a_ok /*, path_b_ok */;
- int retry = 2;
- const u32 adda_regs[RTL8XXXU_ADDA_REGS] = {
- REG_FPGA0_XCD_SWITCH_CTRL, REG_BLUETOOTH,
- REG_RX_WAIT_CCA, REG_TX_CCK_RFON,
- REG_TX_CCK_BBON, REG_TX_OFDM_RFON,
- REG_TX_OFDM_BBON, REG_TX_TO_RX,
- REG_TX_TO_TX, REG_RX_CCK,
- REG_RX_OFDM, REG_RX_WAIT_RIFS,
- REG_RX_TO_RX, REG_STANDBY,
- REG_SLEEP, REG_PMPD_ANAEN
- };
- const u32 iqk_mac_regs[RTL8XXXU_MAC_REGS] = {
- REG_TXPAUSE, REG_BEACON_CTRL,
- REG_BEACON_CTRL_1, REG_GPIO_MUXCFG
- };
- const u32 iqk_bb_regs[RTL8XXXU_BB_REGS] = {
- REG_OFDM0_TRX_PATH_ENABLE, REG_OFDM0_TR_MUX_PAR,
- REG_FPGA0_XCD_RF_SW_CTRL, REG_CONFIG_ANT_A, REG_CONFIG_ANT_B,
- REG_FPGA0_XAB_RF_SW_CTRL, REG_FPGA0_XA_RF_INT_OE,
- REG_FPGA0_XB_RF_INT_OE, REG_FPGA0_RF_MODE
- };
- u8 xa_agc = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1) & 0xff;
- u8 xb_agc = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1) & 0xff;
-
- /*
- * Note: IQ calibration must be performed after loading
- * PHY_REG.txt , and radio_a, radio_b.txt
- */
-
- if (t == 0) {
- /* Save ADDA parameters, turn Path A ADDA on */
- rtl8xxxu_save_regs(priv, adda_regs, priv->adda_backup,
- RTL8XXXU_ADDA_REGS);
- rtl8xxxu_save_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
- rtl8xxxu_save_regs(priv, iqk_bb_regs,
- priv->bb_backup, RTL8XXXU_BB_REGS);
- }
-
- rtl8xxxu_path_adda_on(priv, adda_regs, true);
-
- /* MAC settings */
- rtl8xxxu_mac_calibration(priv, iqk_mac_regs, priv->mac_backup);
-
- val32 = rtl8xxxu_read32(priv, REG_CCK0_AFE_SETTING);
- val32 |= 0x0f000000;
- rtl8xxxu_write32(priv, REG_CCK0_AFE_SETTING, val32);
-
- rtl8xxxu_write32(priv, REG_OFDM0_TRX_PATH_ENABLE, 0x03a05600);
- rtl8xxxu_write32(priv, REG_OFDM0_TR_MUX_PAR, 0x000800e4);
- rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_SW_CTRL, 0x22204000);
-
-#ifdef RTL8723BU_PATH_B
- /* Set RF mode to standby Path B */
- if (priv->tx_paths > 1)
- rtl8xxxu_write_rfreg(priv, RF_B, RF6052_REG_AC, 0x10000);
-#endif
-
-#if 0
- /* Page B init */
- rtl8xxxu_write32(priv, REG_CONFIG_ANT_A, 0x0f600000);
-
- if (priv->tx_paths > 1)
- rtl8xxxu_write32(priv, REG_CONFIG_ANT_B, 0x0f600000);
-#endif
-
- /*
- * RX IQ calibration setting for 8723B D cut large current issue
- * when leaving IPS
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x30000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xf7fb7);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
- val32 |= 0x20;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_43, 0x60fbd);
-
- for (i = 0; i < retry; i++) {
- path_a_ok = rtl8723bu_iqk_path_a(priv);
- if (path_a_ok == 0x01) {
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
-#if 0 /* Only needed in restore case, we may need this when going to suspend */
- priv->RFCalibrateInfo.TxLOK[RF_A] =
- rtl8xxxu_read_rfreg(priv, RF_A,
- RF6052_REG_TXM_IDAC);
-#endif
-
- val32 = rtl8xxxu_read32(priv,
- REG_TX_POWER_BEFORE_IQK_A);
- result[t][0] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_TX_POWER_AFTER_IQK_A);
- result[t][1] = (val32 >> 16) & 0x3ff;
-
- break;
- }
- }
-
- if (!path_a_ok)
- dev_dbg(dev, "%s: Path A TX IQK failed!\n", __func__);
-
- for (i = 0; i < retry; i++) {
- path_a_ok = rtl8723bu_rx_iqk_path_a(priv);
- if (path_a_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_BEFORE_IQK_A_2);
- result[t][2] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_AFTER_IQK_A_2);
- result[t][3] = (val32 >> 16) & 0x3ff;
-
- break;
- }
- }
-
- if (!path_a_ok)
- dev_dbg(dev, "%s: Path A RX IQK failed!\n", __func__);
-
- if (priv->tx_paths > 1) {
-#if 1
- dev_warn(dev, "%s: Path B not supported\n", __func__);
-#else
-
- /*
- * Path A into standby
- */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_AC, 0x10000);
-
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- val32 |= 0x80800000;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- /* Turn Path B ADDA on */
- rtl8xxxu_path_adda_on(priv, adda_regs, false);
-
- for (i = 0; i < retry; i++) {
- path_b_ok = rtl8xxxu_iqk_path_b(priv);
- if (path_b_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv, REG_TX_POWER_BEFORE_IQK_B);
- result[t][4] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv, REG_TX_POWER_AFTER_IQK_B);
- result[t][5] = (val32 >> 16) & 0x3ff;
- break;
- }
- }
-
- if (!path_b_ok)
- dev_dbg(dev, "%s: Path B IQK failed!\n", __func__);
-
- for (i = 0; i < retry; i++) {
- path_b_ok = rtl8723bu_rx_iqk_path_b(priv);
- if (path_a_ok == 0x03) {
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_BEFORE_IQK_B_2);
- result[t][6] = (val32 >> 16) & 0x3ff;
- val32 = rtl8xxxu_read32(priv,
- REG_RX_POWER_AFTER_IQK_B_2);
- result[t][7] = (val32 >> 16) & 0x3ff;
- break;
- }
- }
-
- if (!path_b_ok)
- dev_dbg(dev, "%s: Path B RX IQK failed!\n", __func__);
-#endif
- }
-
- /* Back to BB mode, load original value */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 &= 0x000000ff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
-
- if (t) {
- /* Reload ADDA power saving parameters */
- rtl8xxxu_restore_regs(priv, adda_regs, priv->adda_backup,
- RTL8XXXU_ADDA_REGS);
-
- /* Reload MAC parameters */
- rtl8xxxu_restore_mac_regs(priv, iqk_mac_regs, priv->mac_backup);
-
- /* Reload BB parameters */
- rtl8xxxu_restore_regs(priv, iqk_bb_regs,
- priv->bb_backup, RTL8XXXU_BB_REGS);
-
- /* Restore RX initial gain */
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_XA_AGC_CORE1);
- val32 &= 0xffffff00;
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | 0x50);
- rtl8xxxu_write32(priv, REG_OFDM0_XA_AGC_CORE1, val32 | xa_agc);
-
- if (priv->tx_paths > 1) {
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_XB_AGC_CORE1);
- val32 &= 0xffffff00;
- rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
- val32 | 0x50);
- rtl8xxxu_write32(priv, REG_OFDM0_XB_AGC_CORE1,
- val32 | xb_agc);
- }
-
- /* Load 0xe30 IQC default value */
- rtl8xxxu_write32(priv, REG_TX_IQK_TONE_A, 0x01008c00);
- rtl8xxxu_write32(priv, REG_RX_IQK_TONE_A, 0x01008c00);
- }
-}
-
-static void rtl8xxxu_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
+void rtl8xxxu_gen2_prepare_calibrate(struct rtl8xxxu_priv *priv, u8 start)
{
struct h2c_cmd h2c;
- if (priv->fops->mbox_ext_width < 4)
- return;
-
memset(&h2c, 0, sizeof(struct h2c_cmd));
h2c.bt_wlan_calibration.cmd = H2C_8723B_BT_WLAN_CALIBRATION;
h2c.bt_wlan_calibration.data = start;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.bt_wlan_calibration));
}
-static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
int result[4][8]; /* last is final result */
@@ -4880,8 +3321,6 @@ static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
s32 reg_tmp = 0;
bool simu;
- rtl8xxxu_prepare_calibrate(priv, 1);
-
memset(result, 0, sizeof(result));
candidate = -1;
@@ -4967,137 +3406,8 @@ static void rtl8723au_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
candidate, (reg_ec4 == 0));
- rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
- priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
-
- rtl8xxxu_prepare_calibrate(priv, 0);
-}
-
-static void rtl8723bu_phy_iq_calibrate(struct rtl8xxxu_priv *priv)
-{
- struct device *dev = &priv->udev->dev;
- int result[4][8]; /* last is final result */
- int i, candidate;
- bool path_a_ok, path_b_ok;
- u32 reg_e94, reg_e9c, reg_ea4, reg_eac;
- u32 reg_eb4, reg_ebc, reg_ec4, reg_ecc;
- u32 val32, bt_control;
- s32 reg_tmp = 0;
- bool simu;
-
- rtl8xxxu_prepare_calibrate(priv, 1);
-
- memset(result, 0, sizeof(result));
- candidate = -1;
-
- path_a_ok = false;
- path_b_ok = false;
-
- bt_control = rtl8xxxu_read32(priv, REG_BT_CONTROL_8723BU);
-
- for (i = 0; i < 3; i++) {
- rtl8723bu_phy_iqcalibrate(priv, result, i);
-
- if (i == 1) {
- simu = rtl8723bu_simularity_compare(priv, result, 0, 1);
- if (simu) {
- candidate = 0;
- break;
- }
- }
-
- if (i == 2) {
- simu = rtl8723bu_simularity_compare(priv, result, 0, 2);
- if (simu) {
- candidate = 0;
- break;
- }
-
- simu = rtl8723bu_simularity_compare(priv, result, 1, 2);
- if (simu) {
- candidate = 1;
- } else {
- for (i = 0; i < 8; i++)
- reg_tmp += result[3][i];
-
- if (reg_tmp)
- candidate = 3;
- else
- candidate = -1;
- }
- }
- }
-
- for (i = 0; i < 4; i++) {
- reg_e94 = result[i][0];
- reg_e9c = result[i][1];
- reg_ea4 = result[i][2];
- reg_eac = result[i][3];
- reg_eb4 = result[i][4];
- reg_ebc = result[i][5];
- reg_ec4 = result[i][6];
- reg_ecc = result[i][7];
- }
-
- if (candidate >= 0) {
- reg_e94 = result[candidate][0];
- priv->rege94 = reg_e94;
- reg_e9c = result[candidate][1];
- priv->rege9c = reg_e9c;
- reg_ea4 = result[candidate][2];
- reg_eac = result[candidate][3];
- reg_eb4 = result[candidate][4];
- priv->regeb4 = reg_eb4;
- reg_ebc = result[candidate][5];
- priv->regebc = reg_ebc;
- reg_ec4 = result[candidate][6];
- reg_ecc = result[candidate][7];
- dev_dbg(dev, "%s: candidate is %x\n", __func__, candidate);
- dev_dbg(dev,
- "%s: e94 =%x e9c=%x ea4=%x eac=%x eb4=%x ebc=%x ec4=%x "
- "ecc=%x\n ", __func__, reg_e94, reg_e9c,
- reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc);
- path_a_ok = true;
- path_b_ok = true;
- } else {
- reg_e94 = reg_eb4 = priv->rege94 = priv->regeb4 = 0x100;
- reg_e9c = reg_ebc = priv->rege9c = priv->regebc = 0x0;
- }
-
- if (reg_e94 && candidate >= 0)
- rtl8xxxu_fill_iqk_matrix_a(priv, path_a_ok, result,
- candidate, (reg_ea4 == 0));
-
- if (priv->tx_paths > 1 && reg_eb4)
- rtl8xxxu_fill_iqk_matrix_b(priv, path_b_ok, result,
- candidate, (reg_ec4 == 0));
-
- rtl8xxxu_save_regs(priv, rtl8723au_iqk_phy_iq_bb_reg,
+ rtl8xxxu_save_regs(priv, rtl8xxxu_iqk_phy_iq_bb_reg,
priv->bb_recovery_backup, RTL8XXXU_BB_REGS);
-
- rtl8xxxu_write32(priv, REG_BT_CONTROL_8723BU, bt_control);
-
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_WE_LUT);
- val32 |= 0x80000;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_WE_LUT, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_RCK_OS, 0x18000);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G1, 0x0001f);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_TXPA_G2, 0xe6177);
- val32 = rtl8xxxu_read_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED);
- val32 |= 0x20;
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_UNKNOWN_ED, val32);
- rtl8xxxu_write_rfreg(priv, RF_A, 0x43, 0x300bd);
-
- if (priv->rf_paths > 1) {
- dev_dbg(dev, "%s: beware 2T not yet supported\n", __func__);
-#ifdef RTL8723BU_PATH_B
- if (RF_Path == 0x0) //S1
- ODM_SetIQCbyRFpath(pDM_Odm, 0);
- else //S0
- ODM_SetIQCbyRFpath(pDM_Odm, 1);
-#endif
- }
- rtl8xxxu_prepare_calibrate(priv, 0);
}
static void rtl8723a_phy_lc_calibrate(struct rtl8xxxu_priv *priv)
@@ -5223,7 +3533,7 @@ static void rtl8xxxu_set_ampdu_min_space(struct rtl8xxxu_priv *priv, u8 density)
static int rtl8xxxu_active_to_emu(struct rtl8xxxu_priv *priv)
{
u8 val8;
- int count, ret;
+ int count, ret = 0;
/* Start of rtl8723AU_card_enable_flow */
/* Act to Cardemu sequence*/
@@ -5268,69 +3578,11 @@ exit:
return ret;
}
-static int rtl8723bu_active_to_emu(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int count, ret;
-
- /* Turn off RF */
- rtl8xxxu_write8(priv, REG_RF_CTRL, 0);
-
- /* Enable rising edge triggering interrupt */
- val16 = rtl8xxxu_read16(priv, REG_GPIO_INTM);
- val16 &= ~GPIO_INTM_EDGE_TRIG_IRQ;
- rtl8xxxu_write16(priv, REG_GPIO_INTM, val16);
-
- /* Release WLON reset 0x04[16]= 1*/
- val32 = rtl8xxxu_read32(priv, REG_GPIO_INTM);
- val32 |= APS_FSMCO_WLON_RESET;
- rtl8xxxu_write32(priv, REG_GPIO_INTM, val32);
-
- /* 0x0005[1] = 1 turn off MAC by HW state machine*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- if ((val8 & BIT(1)) == 0)
- break;
- udelay(10);
- }
-
- if (!count) {
- dev_warn(&priv->udev->dev, "%s: Disabling MAC timed out\n",
- __func__);
- ret = -EBUSY;
- goto exit;
- }
-
- /* Enable BT control XTAL setting */
- val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
- val8 &= ~AFE_MISC_WL_XTAL_CTRL;
- rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
-
- /* 0x0000[5] = 1 analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 |= SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* 0x0020[0] = 0 disable LDOA12 MACRO block*/
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 &= ~LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
-exit:
- return ret;
-}
-
-static int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
+int rtl8xxxu_active_to_lps(struct rtl8xxxu_priv *priv)
{
u8 val8;
u8 val32;
- int count, ret;
+ int count, ret = 0;
rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
@@ -5382,7 +3634,7 @@ exit:
return ret;
}
-static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_disabled_to_emu(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -5402,294 +3654,6 @@ static void rtl8723a_disabled_to_emu(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
}
-static void rtl8192e_disabled_to_emu(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
-
- /* Clear suspend enable and power down enable*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-}
-
-static int rtl8192e_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* disable HWPDN 0x04[15]=0*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(7);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable SW LPS 0x04[10]= 0 */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(2);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable WL suspend*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* wait till 0x04[17] = 1 power ready*/
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* release WLON reset 0x04[16]= 1*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /* set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
-exit:
- return ret;
-}
-
-static int rtl8723a_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface*/
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 |= LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
- /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 &= ~BIT(4);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- mdelay(1);
-
- /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* disable SW LPS 0x04[10]= 0 */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(2);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* wait till 0x04[17] = 1 power ready*/
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* release WLON reset 0x04[16]= 1*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /* disable HWPDN 0x04[15]= 0*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~BIT(7);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* disable WL suspend*/
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 1);
- val8 &= ~(BIT(3) | BIT(4));
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 1, val8);
-
- /* set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* 0x4C[23] = 0x4E[7] = 1, switch DPDT_SEL_P output from WL BB */
- /*
- * Note: Vendor driver actually clears this bit, despite the
- * documentation claims it's being set!
- */
- val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
- val8 |= LEDCFG2_DPDT_SELECT;
- val8 &= ~LEDCFG2_DPDT_SELECT;
- rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
-
-exit:
- return ret;
-}
-
-static int rtl8723b_emu_to_active(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u32 val32;
- int count, ret = 0;
-
- /* 0x20[0] = 1 enable LDOA12 MACRO block for all interface */
- val8 = rtl8xxxu_read8(priv, REG_LDOA15_CTRL);
- val8 |= LDOA15_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOA15_CTRL, val8);
-
- /* 0x67[0] = 0 to disable BT_GPS_SEL pins*/
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 &= ~BIT(4);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- mdelay(1);
-
- /* 0x00[5] = 0 release analog Ips to digital, 1:isolation */
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_ANALOG_IPS;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
-
- /* Disable SW LPS 0x04[10]= 0 */
- val32 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
- val32 &= ~APS_FSMCO_SW_LPS;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Wait until 0x04[17] = 1 power ready */
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if (val32 & BIT(17))
- break;
-
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* We should be able to optimize the following three entries into one */
-
- /* Release WLON reset 0x04[16]= 1*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_WLON_RESET;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Disable HWPDN 0x04[15]= 0*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 &= ~APS_FSMCO_HW_POWERDOWN;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Disable WL suspend*/
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 &= ~(APS_FSMCO_HW_SUSPEND | APS_FSMCO_PCIE);
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- /* Set, then poll until 0 */
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- val32 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write32(priv, REG_APS_FSMCO, val32);
-
- for (count = RTL8XXXU_MAX_REG_POLL; count; count--) {
- val32 = rtl8xxxu_read32(priv, REG_APS_FSMCO);
- if ((val32 & APS_FSMCO_MAC_ENABLE) == 0) {
- ret = 0;
- break;
- }
- udelay(10);
- }
-
- if (!count) {
- ret = -EBUSY;
- goto exit;
- }
-
- /* Enable WL control XTAL setting */
- val8 = rtl8xxxu_read8(priv, REG_AFE_MISC);
- val8 |= AFE_MISC_WL_XTAL_CTRL;
- rtl8xxxu_write8(priv, REG_AFE_MISC, val8);
-
- /* Enable falling edge triggering interrupt */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_INTM + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_INTM + 1, val8);
-
- /* Enable GPIO9 interrupt mode */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2 + 1);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2 + 1, val8);
-
- /* Enable GPIO9 input mode */
- val8 = rtl8xxxu_read8(priv, REG_GPIO_IO_SEL_2);
- val8 &= ~BIT(1);
- rtl8xxxu_write8(priv, REG_GPIO_IO_SEL_2, val8);
-
- /* Enable HSISR GPIO[C:0] interrupt */
- val8 = rtl8xxxu_read8(priv, REG_HSIMR);
- val8 |= BIT(0);
- rtl8xxxu_write8(priv, REG_HSIMR, val8);
-
- /* Enable HSISR GPIO9 interrupt */
- val8 = rtl8xxxu_read8(priv, REG_HSIMR + 2);
- val8 |= BIT(1);
- rtl8xxxu_write8(priv, REG_HSIMR + 2, val8);
-
- val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL);
- val8 |= MULTI_WIFI_HW_ROF_EN;
- rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL, val8);
-
- /* For GPIO9 internal pull high setting BIT(14) */
- val8 = rtl8xxxu_read8(priv, REG_MULTI_FUNC_CTRL + 1);
- val8 |= BIT(6);
- rtl8xxxu_write8(priv, REG_MULTI_FUNC_CTRL + 1, val8);
-
-exit:
- return ret;
-}
-
static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
{
u8 val8;
@@ -5715,7 +3679,7 @@ static int rtl8xxxu_emu_to_disabled(struct rtl8xxxu_priv *priv)
return 0;
}
-static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
+int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
{
struct device *dev = &priv->udev->dev;
u32 val32;
@@ -5748,262 +3712,51 @@ static int rtl8xxxu_flush_fifo(struct rtl8xxxu_priv *priv)
return retval;
}
-static int rtl8723au_power_on(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int ret;
-
- /*
- * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
- */
- rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
-
- rtl8723a_disabled_to_emu(priv);
-
- ret = rtl8723a_emu_to_active(priv);
- if (ret)
- goto exit;
-
- /*
- * 0x0004[19] = 1, reset 8051
- */
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO + 2);
- val8 |= BIT(3);
- rtl8xxxu_write8(priv, REG_APS_FSMCO + 2, val8);
-
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
-
- /* For EFuse PG */
- val32 = rtl8xxxu_read32(priv, REG_EFUSE_CTRL);
- val32 &= ~(BIT(28) | BIT(29) | BIT(30));
- val32 |= (0x06 << 28);
- rtl8xxxu_write32(priv, REG_EFUSE_CTRL, val32);
-exit:
- return ret;
-}
-
-static int rtl8723bu_power_on(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen1_usb_quirks(struct rtl8xxxu_priv *priv)
{
- u8 val8;
- u16 val16;
- u32 val32;
- int ret;
-
- rtl8723a_disabled_to_emu(priv);
-
- ret = rtl8723b_emu_to_active(priv);
- if (ret)
- goto exit;
-
+ /* Fix USB interface interference issue */
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x8d);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
/*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
+ * This sets TXDMA_OFFSET_DROP_DATA_EN (bit 9) as well as bits
+ * 8 and 5, for which I have found no documentation.
*/
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
/*
- * BT coexist power on settings. This is identical for 1 and 2
- * antenna parts.
+ * Solve too many protocol error on USB bus.
+ * Can't do this for 8188/8192 UMC A cut parts
*/
- rtl8xxxu_write8(priv, REG_PAD_CTRL1 + 3, 0x20);
-
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 |= SYS_FUNC_BBRSTB | SYS_FUNC_BB_GLB_RSTN;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- rtl8xxxu_write8(priv, REG_BT_CONTROL_8723BU + 1, 0x18);
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
- /* Antenna inverse */
- rtl8xxxu_write8(priv, 0xfe08, 0x01);
-
- val16 = rtl8xxxu_read16(priv, REG_PWR_DATA);
- val16 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write16(priv, REG_PWR_DATA, val16);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 |= LEDCFG0_DPDT_SELECT;
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
- val8 &= ~PAD_CTRL1_SW_DPDT_SEL_DATA;
- rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
-exit:
- return ret;
-}
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
- u32 val32;
- int i;
-
- for (i = 100; i; i--) {
- val8 = rtl8xxxu_read8(priv, REG_APS_FSMCO);
- if (val8 & APS_FSMCO_PFM_ALDN)
- break;
- }
-
- if (!i) {
- pr_info("%s: Poll failed\n", __func__);
- return -ENODEV;
- }
-
- /*
- * RSV_CTRL 0x001C[7:0] = 0x00, unlock ISO/CLK/Power control register
- */
- rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0);
- rtl8xxxu_write8(priv, REG_SPS0_CTRL, 0x2b);
- udelay(100);
-
- val8 = rtl8xxxu_read8(priv, REG_LDOV12D_CTRL);
- if (!(val8 & LDOV12D_ENABLE)) {
- pr_info("%s: Enabling LDOV12D (%02x)\n", __func__, val8);
- val8 |= LDOV12D_ENABLE;
- rtl8xxxu_write8(priv, REG_LDOV12D_CTRL, val8);
-
- udelay(100);
-
- val8 = rtl8xxxu_read8(priv, REG_SYS_ISO_CTRL);
- val8 &= ~SYS_ISO_MD2PP;
- rtl8xxxu_write8(priv, REG_SYS_ISO_CTRL, val8);
- }
-
- /*
- * Auto enable WLAN
- */
- val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
- val16 |= APS_FSMCO_MAC_ENABLE;
- rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
-
- for (i = 1000; i; i--) {
- val16 = rtl8xxxu_read16(priv, REG_APS_FSMCO);
- if (!(val16 & APS_FSMCO_MAC_ENABLE))
- break;
- }
- if (!i) {
- pr_info("%s: FSMCO_MAC_ENABLE poll failed\n", __func__);
- return -EBUSY;
- }
-
- /*
- * Enable radio, GPIO, LED
- */
- val16 = APS_FSMCO_HW_SUSPEND | APS_FSMCO_ENABLE_POWERDOWN |
- APS_FSMCO_PFM_ALDN;
- rtl8xxxu_write16(priv, REG_APS_FSMCO, val16);
-
- /*
- * Release RF digital isolation
- */
- val16 = rtl8xxxu_read16(priv, REG_SYS_ISO_CTRL);
- val16 &= ~SYS_ISO_DIOR;
- rtl8xxxu_write16(priv, REG_SYS_ISO_CTRL, val16);
-
- val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
- val8 &= ~APSD_CTRL_OFF;
- rtl8xxxu_write8(priv, REG_APSD_CTRL, val8);
- for (i = 200; i; i--) {
- val8 = rtl8xxxu_read8(priv, REG_APSD_CTRL);
- if (!(val8 & APSD_CTRL_OFF_STATUS))
- break;
- }
+ if (!(!priv->chip_cut && priv->vendor_umc)) {
+ rtl8xxxu_write8(priv, 0xfe40, 0xe6);
+ rtl8xxxu_write8(priv, 0xfe41, 0x94);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- if (!i) {
- pr_info("%s: APSD_CTRL poll failed\n", __func__);
- return -EBUSY;
- }
+ rtl8xxxu_write8(priv, 0xfe40, 0xe0);
+ rtl8xxxu_write8(priv, 0xfe41, 0x19);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE | CR_PROTOCOL_ENABLE |
- CR_SCHEDULE_ENABLE | CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE;
- rtl8xxxu_write16(priv, REG_CR, val16);
+ rtl8xxxu_write8(priv, 0xfe40, 0xe5);
+ rtl8xxxu_write8(priv, 0xfe41, 0x91);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
- /*
- * Workaround for 8188RU LNA power leakage problem.
- */
- if (priv->rtlchip == 0x8188c && priv->hi_pa) {
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
- val32 &= ~BIT(1);
- rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
+ rtl8xxxu_write8(priv, 0xfe40, 0xe2);
+ rtl8xxxu_write8(priv, 0xfe41, 0x81);
+ rtl8xxxu_write8(priv, 0xfe42, 0x80);
}
- return 0;
}
-#endif
-
-static int rtl8192eu_power_on(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen2_usb_quirks(struct rtl8xxxu_priv *priv)
{
- u16 val16;
u32 val32;
- int ret;
-
- ret = 0;
-
- val32 = rtl8xxxu_read32(priv, REG_SYS_CFG);
- if (val32 & SYS_CFG_SPS_LDO_SEL) {
- rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0xc3);
- } else {
- /*
- * Raise 1.2V voltage
- */
- val32 = rtl8xxxu_read32(priv, REG_8192E_LDOV12_CTRL);
- val32 &= 0xff0fffff;
- val32 |= 0x00500000;
- rtl8xxxu_write32(priv, REG_8192E_LDOV12_CTRL, val32);
- rtl8xxxu_write8(priv, REG_LDO_SW_CTRL, 0x83);
- }
-
- rtl8192e_disabled_to_emu(priv);
-
- ret = rtl8192e_emu_to_active(priv);
- if (ret)
- goto exit;
-
- rtl8xxxu_write16(priv, REG_CR, 0x0000);
-
- /*
- * Enable MAC DMA/WMAC/SCHEDULE/SEC block
- * Set CR bit10 to enable 32k calibration.
- */
- val16 = rtl8xxxu_read16(priv, REG_CR);
- val16 |= (CR_HCI_TXDMA_ENABLE | CR_HCI_RXDMA_ENABLE |
- CR_TXDMA_ENABLE | CR_RXDMA_ENABLE |
- CR_PROTOCOL_ENABLE | CR_SCHEDULE_ENABLE |
- CR_MAC_TX_ENABLE | CR_MAC_RX_ENABLE |
- CR_SECURITY_ENABLE | CR_CALTIMER_ENABLE);
- rtl8xxxu_write16(priv, REG_CR, val16);
-exit:
- return ret;
+ val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
+ val32 |= TXDMA_OFFSET_DROP_DATA_EN;
+ rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
}
-static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
{
u8 val8;
u16 val16;
@@ -6012,7 +3765,7 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
/*
* Workaround for 8188RU LNA power leakage problem.
*/
- if (priv->rtlchip == 0x8188c && priv->hi_pa) {
+ if (priv->rtl_chip == RTL8188R) {
val32 = rtl8xxxu_read32(priv, REG_FPGA0_XCD_RF_PARM);
val32 |= BIT(1);
rtl8xxxu_write32(priv, REG_FPGA0_XCD_RF_PARM, val32);
@@ -6053,40 +3806,6 @@ static void rtl8xxxu_power_off(struct rtl8xxxu_priv *priv)
rtl8xxxu_write8(priv, REG_RSV_CTRL, 0x0e);
}
-static void rtl8723bu_power_off(struct rtl8xxxu_priv *priv)
-{
- u8 val8;
- u16 val16;
-
- rtl8xxxu_flush_fifo(priv);
-
- /*
- * Disable TX report timer
- */
- val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
- val8 &= ~TX_REPORT_CTRL_TIMER_ENABLE;
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
-
- rtl8xxxu_write16(priv, REG_CR, 0x0000);
-
- rtl8xxxu_active_to_lps(priv);
-
- /* Reset Firmware if running in RAM */
- if (rtl8xxxu_read8(priv, REG_MCU_FW_DL) & MCU_FW_RAM_SEL)
- rtl8xxxu_firmware_self_reset(priv);
-
- /* Reset MCU */
- val16 = rtl8xxxu_read16(priv, REG_SYS_FUNC);
- val16 &= ~SYS_FUNC_CPU_ENABLE;
- rtl8xxxu_write16(priv, REG_SYS_FUNC, val16);
-
- /* Reset MCU ready status */
- rtl8xxxu_write8(priv, REG_MCU_FW_DL, 0x00);
-
- rtl8723bu_active_to_emu(priv);
- rtl8xxxu_emu_to_disabled(priv);
-}
-
#ifdef NEED_PS_TDMA
static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
u8 arg1, u8 arg2, u8 arg3, u8 arg4, u8 arg5)
@@ -6100,175 +3819,77 @@ static void rtl8723bu_set_ps_tdma(struct rtl8xxxu_priv *priv,
h2c.b_type_dma.data3 = arg3;
h2c.b_type_dma.data4 = arg4;
h2c.b_type_dma.data5 = arg5;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_type_dma));
}
#endif
-static void rtl8723b_enable_rf(struct rtl8xxxu_priv *priv)
+void rtl8xxxu_gen2_disable_rf(struct rtl8xxxu_priv *priv)
{
- struct h2c_cmd h2c;
u32 val32;
- u8 val8;
-
- /*
- * No indication anywhere as to what 0x0790 does. The 2 antenna
- * vendor code preserves bits 6-7 here.
- */
- rtl8xxxu_write8(priv, 0x0790, 0x05);
- /*
- * 0x0778 seems to be related to enabling the number of antennas
- * In the vendor driver halbtc8723b2ant_InitHwConfig() sets it
- * to 0x03, while halbtc8723b1ant_InitHwConfig() sets it to 0x01
- */
- rtl8xxxu_write8(priv, 0x0778, 0x01);
-
- val8 = rtl8xxxu_read8(priv, REG_GPIO_MUXCFG);
- val8 |= BIT(5);
- rtl8xxxu_write8(priv, REG_GPIO_MUXCFG, val8);
-
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_IQADJ_G1, 0x780);
- rtl8723bu_write_btreg(priv, 0x3c, 0x15); /* BT TRx Mask on */
-
- /*
- * Set BT grant to low
- */
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_grant.cmd = H2C_8723B_BT_GRANT;
- h2c.bt_grant.data = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_grant));
-
- /*
- * WLAN action by PTA
- */
- rtl8xxxu_write8(priv, REG_WLAN_ACT_CONTROL_8723B, 0x04);
-
- /*
- * BT select S0/S1 controlled by WiFi
- */
- val8 = rtl8xxxu_read8(priv, 0x0067);
- val8 |= BIT(5);
- rtl8xxxu_write8(priv, 0x0067, val8);
-
- val32 = rtl8xxxu_read32(priv, REG_PWR_DATA);
- val32 |= PWR_DATA_EEPRPAD_RFE_CTRL_EN;
- rtl8xxxu_write32(priv, REG_PWR_DATA, val32);
-
- /*
- * Bits 6/7 are marked in/out ... but for what?
- */
- rtl8xxxu_write8(priv, 0x0974, 0xff);
-
- val32 = rtl8xxxu_read32(priv, REG_RFE_BUFFER);
- val32 |= (BIT(0) | BIT(1));
- rtl8xxxu_write32(priv, REG_RFE_BUFFER, val32);
-
- rtl8xxxu_write8(priv, REG_RFE_CTRL_ANTA_SRC, 0x77);
-
- val32 = rtl8xxxu_read32(priv, REG_LEDCFG0);
- val32 &= ~BIT(24);
- val32 |= BIT(23);
- rtl8xxxu_write32(priv, REG_LEDCFG0, val32);
-
- /*
- * Fix external switch Main->S1, Aux->S0
- */
- val8 = rtl8xxxu_read8(priv, REG_PAD_CTRL1);
- val8 &= ~BIT(0);
- rtl8xxxu_write8(priv, REG_PAD_CTRL1, val8);
+ val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
+ val32 &= ~(BIT(22) | BIT(23));
+ rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
+}
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.ant_sel_rsv.cmd = H2C_8723B_ANT_SEL_RSV;
- h2c.ant_sel_rsv.ant_inverse = 1;
- h2c.ant_sel_rsv.int_switch_type = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ant_sel_rsv));
+static void rtl8xxxu_old_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
+{
+ u8 val8;
+ u32 val32;
- /*
- * 0x280, 0x00, 0x200, 0x80 - not clear
- */
- rtl8xxxu_write32(priv, REG_S0S1_PATH_SWITCH, 0x00);
+ if (priv->ep_tx_normal_queue)
+ val8 = TX_PAGE_NUM_NORM_PQ;
+ else
+ val8 = 0;
- /*
- * Software control, antenna at WiFi side
- */
-#ifdef NEED_PS_TDMA
- rtl8723bu_set_ps_tdma(priv, 0x08, 0x00, 0x00, 0x00, 0x00);
-#endif
+ rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE1, 0x55555555);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE2, 0x55555555);
- rtl8xxxu_write32(priv, REG_BT_COEX_TABLE3, 0x00ffffff);
- rtl8xxxu_write8(priv, REG_BT_COEX_TABLE4, 0x03);
+ val32 = (TX_PAGE_NUM_PUBQ << RQPN_PUB_PQ_SHIFT) | RQPN_LOAD;
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.bt_info.cmd = H2C_8723B_BT_INFO;
- h2c.bt_info.data = BIT(0);
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.bt_info));
+ if (priv->ep_tx_high_queue)
+ val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
+ if (priv->ep_tx_low_queue)
+ val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
- memset(&h2c, 0, sizeof(struct h2c_cmd));
- h2c.ignore_wlan.cmd = H2C_8723B_BT_IGNORE_WLANACT;
- h2c.ignore_wlan.data = 0;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ignore_wlan));
+ rtl8xxxu_write32(priv, REG_RQPN, val32);
}
-static void rtl8723b_disable_rf(struct rtl8xxxu_priv *priv)
+static void rtl8xxxu_init_queue_reserved_page(struct rtl8xxxu_priv *priv)
{
+ struct rtl8xxxu_fileops *fops = priv->fops;
+ u32 hq, lq, nq, eq, pubq;
u32 val32;
- rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
-
- val32 = rtl8xxxu_read32(priv, REG_RX_WAIT_CCA);
- val32 &= ~(BIT(22) | BIT(23));
- rtl8xxxu_write32(priv, REG_RX_WAIT_CCA, val32);
-}
-
-static void rtl8723bu_init_aggregation(struct rtl8xxxu_priv *priv)
-{
- u32 agg_rx;
- u8 agg_ctrl;
+ hq = 0;
+ lq = 0;
+ nq = 0;
+ eq = 0;
+ pubq = 0;
- /*
- * For now simply disable RX aggregation
- */
- agg_ctrl = rtl8xxxu_read8(priv, REG_TRXDMA_CTRL);
- agg_ctrl &= ~TRXDMA_CTRL_RXDMA_AGG_EN;
+ if (priv->ep_tx_high_queue)
+ hq = fops->page_num_hi;
+ if (priv->ep_tx_low_queue)
+ lq = fops->page_num_lo;
+ if (priv->ep_tx_normal_queue)
+ nq = fops->page_num_norm;
- agg_rx = rtl8xxxu_read32(priv, REG_RXDMA_AGG_PG_TH);
- agg_rx &= ~RXDMA_USB_AGG_ENABLE;
- agg_rx &= ~0xff0f;
+ val32 = (nq << RQPN_NPQ_SHIFT) | (eq << RQPN_EPQ_SHIFT);
+ rtl8xxxu_write32(priv, REG_RQPN_NPQ, val32);
- rtl8xxxu_write8(priv, REG_TRXDMA_CTRL, agg_ctrl);
- rtl8xxxu_write32(priv, REG_RXDMA_AGG_PG_TH, agg_rx);
-}
+ pubq = fops->total_page_num - hq - lq - nq;
-static void rtl8723bu_init_statistics(struct rtl8xxxu_priv *priv)
-{
- u32 val32;
+ val32 = RQPN_LOAD;
+ val32 |= (hq << RQPN_HI_PQ_SHIFT);
+ val32 |= (lq << RQPN_LO_PQ_SHIFT);
+ val32 |= (pubq << RQPN_PUB_PQ_SHIFT);
- /* Time duration for NHM unit: 4us, 0x2710=40ms */
- rtl8xxxu_write16(priv, REG_NHM_TIMER_8723B + 2, 0x2710);
- rtl8xxxu_write16(priv, REG_NHM_TH9_TH10_8723B + 2, 0xffff);
- rtl8xxxu_write32(priv, REG_NHM_TH3_TO_TH0_8723B, 0xffffff52);
- rtl8xxxu_write32(priv, REG_NHM_TH7_TO_TH4_8723B, 0xffffffff);
- /* TH8 */
- val32 = rtl8xxxu_read32(priv, REG_FPGA0_IQK);
- val32 |= 0xff;
- rtl8xxxu_write32(priv, REG_FPGA0_IQK, val32);
- /* Enable CCK */
- val32 = rtl8xxxu_read32(priv, REG_NHM_TH9_TH10_8723B);
- val32 |= BIT(8) | BIT(9) | BIT(10);
- rtl8xxxu_write32(priv, REG_NHM_TH9_TH10_8723B, val32);
- /* Max power amongst all RX antennas */
- val32 = rtl8xxxu_read32(priv, REG_OFDM0_FA_RSTC);
- val32 |= BIT(7);
- rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
+ rtl8xxxu_write32(priv, REG_RQPN, val32);
}
static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
{
struct rtl8xxxu_priv *priv = hw->priv;
struct device *dev = &priv->udev->dev;
- struct rtl8xxxu_rfregval *rftable;
bool macpower;
int ret;
u8 val8;
@@ -6293,33 +3914,22 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
goto exit;
}
- dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
if (!macpower) {
- ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
- if (ret) {
- dev_warn(dev, "%s: LLT table init failed\n", __func__);
- goto exit;
- }
+ if (priv->fops->total_page_num)
+ rtl8xxxu_init_queue_reserved_page(priv);
+ else
+ rtl8xxxu_old_init_queue_reserved_page(priv);
+ }
- /*
- * Presumably this is for 8188EU as well
- * Enable TX report and TX report timer
- */
- if (priv->rtlchip == 0x8723bu) {
- val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
- val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
- /* Set MAX RPT MACID */
- rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
- /* TX report Timer. Unit: 32us */
- rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+ ret = rtl8xxxu_init_queue_priority(priv);
+ dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
+ if (ret)
+ goto exit;
- /* tmp ps ? */
- val8 = rtl8xxxu_read8(priv, 0xa3);
- val8 &= 0xf8;
- rtl8xxxu_write8(priv, 0xa3, val8);
- }
- }
+ /*
+ * Set RX page boundary
+ */
+ rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, priv->fops->trxff_boundary);
ret = rtl8xxxu_download_firmware(priv);
dev_dbg(dev, "%s: download_fiwmare %i\n", __func__, ret);
@@ -6330,41 +3940,10 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- /* Solve too many protocol error on USB bus */
- /* Can't do this for 8188/8192 UMC A cut parts */
- if (priv->rtlchip == 0x8723a ||
- ((priv->rtlchip == 0x8192c || priv->rtlchip == 0x8191c ||
- priv->rtlchip == 0x8188c) &&
- (priv->chip_cut || !priv->vendor_umc))) {
- rtl8xxxu_write8(priv, 0xfe40, 0xe6);
- rtl8xxxu_write8(priv, 0xfe41, 0x94);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x19);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe5);
- rtl8xxxu_write8(priv, 0xfe41, 0x91);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
-
- rtl8xxxu_write8(priv, 0xfe40, 0xe2);
- rtl8xxxu_write8(priv, 0xfe41, 0x81);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
- }
-
- if (priv->rtlchip == 0x8192e) {
- rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
- rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
- }
-
if (priv->fops->phy_init_antenna_selection)
priv->fops->phy_init_antenna_selection(priv);
- if (priv->rtlchip == 0x8723b)
- ret = rtl8xxxu_init_mac(priv, rtl8723b_mac_init_table);
- else
- ret = rtl8xxxu_init_mac(priv, rtl8723a_mac_init_table);
+ ret = rtl8xxxu_init_mac(priv);
dev_dbg(dev, "%s: init_mac %i\n", __func__, ret);
if (ret)
@@ -6375,92 +3954,37 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (ret)
goto exit;
- switch(priv->rtlchip) {
- case 0x8723a:
- rftable = rtl8723au_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8723b:
- rftable = rtl8723bu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- /*
- * PHY LCK
- */
- rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdfbe0);
- rtl8xxxu_write_rfreg(priv, RF_A, RF6052_REG_MODE_AG, 0x8c01);
- msleep(200);
- rtl8xxxu_write_rfreg(priv, RF_A, 0xb0, 0xdffe0);
- break;
- case 0x8188c:
- if (priv->hi_pa)
- rftable = rtl8188ru_radioa_1t_highpa_table;
- else
- rftable = rtl8192cu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8191c:
- rftable = rtl8192cu_radioa_1t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- break;
- case 0x8192c:
- rftable = rtl8192cu_radioa_2t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_A);
- if (ret)
- break;
- rftable = rtl8192cu_radiob_2t_init_table;
- ret = rtl8xxxu_init_phy_rf(priv, rftable, RF_B);
- break;
- default:
- ret = -EINVAL;
- }
-
+ ret = priv->fops->init_phy_rf(priv);
if (ret)
goto exit;
- /*
- * Chip specific quirks
- */
- if (priv->rtlchip == 0x8723a) {
- /* Fix USB interface interference issue */
- rtl8xxxu_write8(priv, 0xfe40, 0xe0);
- rtl8xxxu_write8(priv, 0xfe41, 0x8d);
- rtl8xxxu_write8(priv, 0xfe42, 0x80);
- rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, 0xfd0320);
+ /* RFSW Control - clear bit 14 ?? */
+ if (priv->rtl_chip != RTL8723B && priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
- /* Reduce 80M spur */
- rtl8xxxu_write32(priv, REG_AFE_XTAL_CTRL, 0x0381808d);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff82);
- rtl8xxxu_write32(priv, REG_AFE_PLL_CTRL, 0xf0ffff83);
- } else {
- val32 = rtl8xxxu_read32(priv, REG_TXDMA_OFFSET_CHK);
- val32 |= TXDMA_OFFSET_DROP_DATA_EN;
- rtl8xxxu_write32(priv, REG_TXDMA_OFFSET_CHK, val32);
+ val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
+ FPGA0_RF_ANTSWB |
+ ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB) << FPGA0_RF_BD_CTRL_SHIFT);
+ if (!priv->no_pape) {
+ val32 |= (FPGA0_RF_PAPE |
+ (FPGA0_RF_PAPE << FPGA0_RF_BD_CTRL_SHIFT));
}
+ rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
- if (!macpower) {
- if (priv->ep_tx_normal_queue)
- val8 = TX_PAGE_NUM_NORM_PQ;
- else
- val8 = 0;
-
- rtl8xxxu_write8(priv, REG_RQPN_NPQ, val8);
-
- val32 = (TX_PAGE_NUM_PUBQ << RQPN_NORM_PQ_SHIFT) | RQPN_LOAD;
-
- if (priv->ep_tx_high_queue)
- val32 |= (TX_PAGE_NUM_HI_PQ << RQPN_HI_PQ_SHIFT);
- if (priv->ep_tx_low_queue)
- val32 |= (TX_PAGE_NUM_LO_PQ << RQPN_LO_PQ_SHIFT);
-
- rtl8xxxu_write32(priv, REG_RQPN, val32);
+ /* 0x860[6:5]= 00 - why? - this sets antenna B */
+ if (priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66f60210);
+ if (!macpower) {
/*
* Set TX buffer boundary
*/
- val8 = TX_TOTAL_PAGE_NUM + 1;
+ if (priv->rtl_chip == RTL8192E)
+ val8 = TX_TOTAL_PAGE_NUM_8192E + 1;
+ else
+ val8 = TX_TOTAL_PAGE_NUM + 1;
- if (priv->rtlchip == 0x8723b)
+ if (priv->rtl_chip == RTL8723B)
val8 -= 1;
rtl8xxxu_write8(priv, REG_TXPKTBUF_BCNQ_BDNY, val8);
@@ -6470,54 +3994,63 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
rtl8xxxu_write8(priv, REG_TDECTRL + 1, val8);
}
- ret = rtl8xxxu_init_queue_priority(priv);
- dev_dbg(dev, "%s: init_queue_priority %i\n", __func__, ret);
- if (ret)
- goto exit;
+ /*
+ * The vendor drivers set PBP for all devices, except 8192e.
+ * There is no explanation for this in any of the sources.
+ */
+ val8 = (priv->fops->pbp_rx << PBP_PAGE_SIZE_RX_SHIFT) |
+ (priv->fops->pbp_tx << PBP_PAGE_SIZE_TX_SHIFT);
+ if (priv->rtl_chip != RTL8192E)
+ rtl8xxxu_write8(priv, REG_PBP, val8);
- /* RFSW Control - clear bit 14 ?? */
- if (priv->rtlchip != 0x8723b)
- rtl8xxxu_write32(priv, REG_FPGA0_TX_INFO, 0x00000003);
- /* 0x07000760 */
- val32 = FPGA0_RF_TRSW | FPGA0_RF_TRSWB | FPGA0_RF_ANTSW |
- FPGA0_RF_ANTSWB | FPGA0_RF_PAPE |
- ((FPGA0_RF_ANTSW | FPGA0_RF_ANTSWB | FPGA0_RF_PAPE) <<
- FPGA0_RF_BD_CTRL_SHIFT);
- rtl8xxxu_write32(priv, REG_FPGA0_XAB_RF_SW_CTRL, val32);
- /* 0x860[6:5]= 00 - why? - this sets antenna B */
- rtl8xxxu_write32(priv, REG_FPGA0_XA_RF_INT_OE, 0x66F60210);
+ dev_dbg(dev, "%s: macpower %i\n", __func__, macpower);
+ if (!macpower) {
+ ret = priv->fops->llt_init(priv, TX_TOTAL_PAGE_NUM);
+ if (ret) {
+ dev_warn(dev, "%s: LLT table init failed\n", __func__);
+ goto exit;
+ }
- priv->rf_mode_ag[0] = rtl8xxxu_read_rfreg(priv, RF_A,
- RF6052_REG_MODE_AG);
+ /*
+ * Chip specific quirks
+ */
+ priv->fops->usb_quirks(priv);
- /*
- * Set RX page boundary
- */
- if (priv->rtlchip == 0x8723b)
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x3f7f);
- else
- rtl8xxxu_write16(priv, REG_TRXFF_BNDY + 2, 0x27ff);
- /*
- * Transfer page size is always 128
- */
- if (priv->rtlchip == 0x8723b)
- val8 = (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_RX_SHIFT) |
- (PBP_PAGE_SIZE_256 << PBP_PAGE_SIZE_TX_SHIFT);
- else
- val8 = (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_RX_SHIFT) |
- (PBP_PAGE_SIZE_128 << PBP_PAGE_SIZE_TX_SHIFT);
- rtl8xxxu_write8(priv, REG_PBP, val8);
+ /*
+ * Presumably this is for 8188EU as well
+ * Enable TX report and TX report timer
+ */
+ if (priv->rtl_chip == RTL8723B) {
+ val8 = rtl8xxxu_read8(priv, REG_TX_REPORT_CTRL);
+ val8 |= TX_REPORT_CTRL_TIMER_ENABLE;
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL, val8);
+ /* Set MAX RPT MACID */
+ rtl8xxxu_write8(priv, REG_TX_REPORT_CTRL + 1, 0x02);
+ /* TX report Timer. Unit: 32us */
+ rtl8xxxu_write16(priv, REG_TX_REPORT_TIME, 0xcdf0);
+
+ /* tmp ps ? */
+ val8 = rtl8xxxu_read8(priv, 0xa3);
+ val8 &= 0xf8;
+ rtl8xxxu_write8(priv, 0xa3, val8);
+ }
+ }
/*
* Unit in 8 bytes, not obvious what it is used for
*/
rtl8xxxu_write8(priv, REG_RX_DRVINFO_SZ, 4);
- /*
- * Enable all interrupts - not obvious USB needs to do this
- */
- rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
- rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+ if (priv->rtl_chip == RTL8192E) {
+ rtl8xxxu_write32(priv, REG_HIMR0, 0x00);
+ rtl8xxxu_write32(priv, REG_HIMR1, 0x00);
+ } else {
+ /*
+ * Enable all interrupts - not obvious USB needs to do this
+ */
+ rtl8xxxu_write32(priv, REG_HISR, 0xffffffff);
+ rtl8xxxu_write32(priv, REG_HIMR, 0xffffffff);
+ }
rtl8xxxu_set_mac(priv);
rtl8xxxu_set_linktype(priv, NL80211_IFTYPE_STATION);
@@ -6592,7 +4125,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* Initialize burst parameters
*/
- if (priv->rtlchip == 0x8723b) {
+ if (priv->rtl_chip == RTL8723B) {
/*
* For USB high speed set 512B packets
*/
@@ -6643,9 +4176,11 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
priv->fops->set_tx_power(priv, 1, false);
/* Let the 8051 take control of antenna setting */
- val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
- val8 |= LEDCFG2_DPDT_SELECT;
- rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+ if (priv->rtl_chip != RTL8192E) {
+ val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
+ val8 |= LEDCFG2_DPDT_SELECT;
+ rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
+ }
rtl8xxxu_write8(priv, REG_HWSEQ_CTRL, 0xff);
@@ -6657,6 +4192,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
if (priv->fops->init_statistics)
priv->fops->init_statistics(priv);
+ if (priv->rtl_chip == RTL8192E) {
+ /*
+ * 0x4c6[3] 1: RTS BW = Data BW
+ * 0: RTS BW depends on CCA / secondary CCA result.
+ */
+ val8 = rtl8xxxu_read8(priv, REG_QUEUE_CTRL);
+ val8 &= ~BIT(3);
+ rtl8xxxu_write8(priv, REG_QUEUE_CTRL, val8);
+ /*
+ * Reset USB mode switch setting
+ */
+ rtl8xxxu_write8(priv, REG_ACLK_MON, 0x00);
+ }
+
rtl8723a_phy_lc_calibrate(priv);
priv->fops->phy_iq_calibrate(priv);
@@ -6664,7 +4213,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/*
* This should enable thermal meter
*/
- if (priv->fops->has_s0s1)
+ if (priv->fops->tx_desc_size == sizeof(struct rtl8xxxu_txdesc40))
rtl8xxxu_write_rfreg(priv,
RF_A, RF6052_REG_T_METER_8723B, 0x37cf8);
else
@@ -6674,7 +4223,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val8 = ((30000 + NAV_UPPER_UNIT - 1) / NAV_UPPER_UNIT);
rtl8xxxu_write8(priv, REG_NAV_UPPER, val8);
- if (priv->rtlchip == 0x8723a) {
+ if (priv->rtl_chip == RTL8723A) {
/*
* 2011/03/09 MH debug only, UMC-B cut pass 2500 S5 test,
* but we need to find root cause.
@@ -6685,6 +4234,8 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
val32 |= FPGA_RF_MODE_CCK;
rtl8xxxu_write32(priv, REG_FPGA0_RF_MODE, val32);
}
+ } else if (priv->rtl_chip == RTL8192E) {
+ rtl8xxxu_write8(priv, REG_USB_HRPWM, 0x00);
}
val32 = rtl8xxxu_read32(priv, REG_FWHW_TXQ_CTRL);
@@ -6692,17 +4243,20 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* ack for xmit mgmt frames. */
rtl8xxxu_write32(priv, REG_FWHW_TXQ_CTRL, val32);
+ if (priv->rtl_chip == RTL8192E) {
+ /*
+ * Fix LDPC rx hang issue.
+ */
+ val32 = rtl8xxxu_read32(priv, REG_AFE_MISC);
+ rtl8xxxu_write8(priv, REG_8192E_LDOV12_CTRL, 0x75);
+ val32 &= 0xfff00fff;
+ val32 |= 0x0007e000;
+ rtl8xxxu_write32(priv, REG_AFE_MISC, val32);
+ }
exit:
return ret;
}
-static void rtl8xxxu_disable_device(struct ieee80211_hw *hw)
-{
- struct rtl8xxxu_priv *priv = hw->priv;
-
- priv->fops->power_off(priv);
-}
-
static void rtl8xxxu_cam_write(struct rtl8xxxu_priv *priv,
struct ieee80211_key_conf *key, const u8 *mac)
{
@@ -6767,8 +4321,7 @@ static void rtl8xxxu_sw_scan_complete(struct ieee80211_hw *hw,
rtl8xxxu_write8(priv, REG_BEACON_CTRL, val8);
}
-static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+void rtl8xxxu_update_rate_mask(struct rtl8xxxu_priv *priv, u32 ramask, int sgi)
{
struct h2c_cmd h2c;
@@ -6784,11 +4337,11 @@ static void rtl8723au_update_rate_mask(struct rtl8xxxu_priv *priv,
dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
__func__, ramask, h2c.ramask.arg, sizeof(h2c.ramask));
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, sizeof(h2c.ramask));
}
-static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
- u32 ramask, int sgi)
+void rtl8xxxu_gen2_update_rate_mask(struct rtl8xxxu_priv *priv,
+ u32 ramask, int sgi)
{
struct h2c_cmd h2c;
u8 bw = 0;
@@ -6810,11 +4363,11 @@ static void rtl8723bu_update_rate_mask(struct rtl8xxxu_priv *priv,
dev_dbg(&priv->udev->dev, "%s: rate mask %08x, arg %02x, size %zi\n",
__func__, ramask, h2c.ramask.arg, sizeof(h2c.b_macid_cfg));
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.b_macid_cfg));
}
-static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+void rtl8xxxu_gen1_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
{
struct h2c_cmd h2c;
@@ -6827,11 +4380,11 @@ static void rtl8723au_report_connect(struct rtl8xxxu_priv *priv,
else
h2c.joinbss.data = H2C_JOIN_BSS_DISCONNECT;
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
+ rtl8xxxu_gen1_h2c_cmd(priv, &h2c, sizeof(h2c.joinbss));
}
-static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
- u8 macid, bool connect)
+void rtl8xxxu_gen2_report_connect(struct rtl8xxxu_priv *priv,
+ u8 macid, bool connect)
{
struct h2c_cmd h2c;
@@ -6843,7 +4396,7 @@ static void rtl8723bu_report_connect(struct rtl8xxxu_priv *priv,
else
h2c.media_status_rpt.parm &= ~BIT(0);
- rtl8723a_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
+ rtl8xxxu_gen2_h2c_cmd(priv, &h2c, sizeof(h2c.media_status_rpt));
}
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
@@ -6913,7 +4466,7 @@ rtl8xxxu_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
rtl8xxxu_write8(priv, REG_BCN_MAX_ERR, 0xff);
- rtl8723a_stop_tx_beacon(priv);
+ rtl8xxxu_stop_tx_beacon(priv);
/* joinbss sequence */
rtl8xxxu_write16(priv, REG_BCN_PSR_RPT,
@@ -7006,7 +4559,7 @@ static u32 rtl8xxxu_queue_select(struct ieee80211_hw *hw, struct sk_buff *skb)
* format. The descriptor checksum is still only calculated over the
* initial 32 bytes of the descriptor!
*/
-static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
+static void rtl8xxxu_calc_tx_desc_csum(struct rtl8xxxu_txdesc32 *tx_desc)
{
__le16 *ptr = (__le16 *)tx_desc;
u16 csum = 0;
@@ -7018,7 +4571,7 @@ static void rtl8xxxu_calc_tx_desc_csum(struct rtl8723au_tx_desc *tx_desc)
*/
tx_desc->csum = cpu_to_le16(0);
- for (i = 0; i < (sizeof(struct rtl8723au_tx_desc) / sizeof(u16)); i++)
+ for (i = 0; i < (sizeof(struct rtl8xxxu_txdesc32) / sizeof(u16)); i++)
csum = csum ^ le16_to_cpu(ptr[i]);
tx_desc->csum |= cpu_to_le16(csum);
@@ -7156,8 +4709,8 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ieee80211_rate *tx_rate = ieee80211_get_tx_rate(hw, tx_info);
struct rtl8xxxu_priv *priv = hw->priv;
- struct rtl8723au_tx_desc *tx_desc;
- struct rtl8723bu_tx_desc *tx_desc40;
+ struct rtl8xxxu_txdesc32 *tx_desc;
+ struct rtl8xxxu_txdesc40 *tx_desc40;
struct rtl8xxxu_tx_urb *tx_urb;
struct ieee80211_sta *sta = NULL;
struct ieee80211_vif *vif = tx_info->control.vif;
@@ -7202,7 +4755,7 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
if (control && control->sta)
sta = control->sta;
- tx_desc = (struct rtl8723au_tx_desc *)skb_push(skb, tx_desc_size);
+ tx_desc = (struct rtl8xxxu_txdesc32 *)skb_push(skb, tx_desc_size);
memset(tx_desc, 0, tx_desc_size);
tx_desc->pkt_size = cpu_to_le16(pktlen);
@@ -7259,37 +4812,35 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
tx_desc->txdw5 |= cpu_to_le32(0x0001ff00);
tx_desc->txdw3 =
- cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723A);
+ cpu_to_le32((u32)seq_number << TXDESC32_SEQ_SHIFT);
if (ampdu_enable)
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_ENABLE_8723A);
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_ENABLE);
else
- tx_desc->txdw1 |= cpu_to_le32(TXDESC_AGG_BREAK_8723A);
+ tx_desc->txdw1 |= cpu_to_le32(TXDESC32_AGG_BREAK);
if (ieee80211_is_mgmt(hdr->frame_control)) {
tx_desc->txdw5 = cpu_to_le32(tx_rate->hw_value);
tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723A);
+ cpu_to_le32(TXDESC32_USE_DRIVER_RATE);
tx_desc->txdw5 |=
- cpu_to_le32(6 <<
- TXDESC_RETRY_LIMIT_SHIFT_8723A);
+ cpu_to_le32(6 << TXDESC32_RETRY_LIMIT_SHIFT);
tx_desc->txdw5 |=
- cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723A);
+ cpu_to_le32(TXDESC32_RETRY_LIMIT_ENABLE);
}
if (ieee80211_is_data_qos(hdr->frame_control))
- tx_desc->txdw4 |= cpu_to_le32(TXDESC_QOS_8723A);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_QOS);
if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
(sta && vif && vif->bss_conf.use_short_preamble))
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723A);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_SHORT_PREAMBLE);
if (rate_flag & IEEE80211_TX_RC_SHORT_GI ||
(ieee80211_is_data_qos(hdr->frame_control) &&
sta && sta->ht_cap.cap &
(IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_SGI_20))) {
- tx_desc->txdw5 |= cpu_to_le32(TXDESC_SHORT_GI);
+ tx_desc->txdw5 |= cpu_to_le32(TXDESC32_SHORT_GI);
}
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
@@ -7299,46 +4850,43 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
*/
tx_desc->txdw4 |=
cpu_to_le32(DESC_RATE_24M <<
- TXDESC_RTS_RATE_SHIFT_8723A);
+ TXDESC32_RTS_RATE_SHIFT);
tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723A);
- tx_desc->txdw4 |=
- cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723A);
+ cpu_to_le32(TXDESC32_RTS_CTS_ENABLE);
+ tx_desc->txdw4 |= cpu_to_le32(TXDESC32_HW_RTS_ENABLE);
}
} else {
- tx_desc40 = (struct rtl8723bu_tx_desc *)tx_desc;
+ tx_desc40 = (struct rtl8xxxu_txdesc40 *)tx_desc;
tx_desc40->txdw4 = cpu_to_le32(rate);
if (ieee80211_is_data(hdr->frame_control)) {
tx_desc->txdw4 |=
cpu_to_le32(0x1f <<
- TXDESC_DATA_RATE_FB_SHIFT_8723B);
+ TXDESC40_DATA_RATE_FB_SHIFT);
}
tx_desc40->txdw9 =
- cpu_to_le32((u32)seq_number << TXDESC_SEQ_SHIFT_8723B);
+ cpu_to_le32((u32)seq_number << TXDESC40_SEQ_SHIFT);
if (ampdu_enable)
- tx_desc40->txdw2 |=
- cpu_to_le32(TXDESC_AGG_ENABLE_8723B);
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_ENABLE);
else
- tx_desc40->txdw2 |= cpu_to_le32(TXDESC_AGG_BREAK_8723B);
+ tx_desc40->txdw2 |= cpu_to_le32(TXDESC40_AGG_BREAK);
if (ieee80211_is_mgmt(hdr->frame_control)) {
tx_desc40->txdw4 = cpu_to_le32(tx_rate->hw_value);
tx_desc40->txdw3 |=
- cpu_to_le32(TXDESC_USE_DRIVER_RATE_8723B);
+ cpu_to_le32(TXDESC40_USE_DRIVER_RATE);
tx_desc40->txdw4 |=
- cpu_to_le32(6 <<
- TXDESC_RETRY_LIMIT_SHIFT_8723B);
+ cpu_to_le32(6 << TXDESC40_RETRY_LIMIT_SHIFT);
tx_desc40->txdw4 |=
- cpu_to_le32(TXDESC_RETRY_LIMIT_ENABLE_8723B);
+ cpu_to_le32(TXDESC40_RETRY_LIMIT_ENABLE);
}
if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE ||
(sta && vif && vif->bss_conf.use_short_preamble))
tx_desc40->txdw5 |=
- cpu_to_le32(TXDESC_SHORT_PREAMBLE_8723B);
+ cpu_to_le32(TXDESC40_SHORT_PREAMBLE);
if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
/*
@@ -7347,11 +4895,9 @@ static void rtl8xxxu_tx(struct ieee80211_hw *hw,
*/
tx_desc->txdw4 |=
cpu_to_le32(DESC_RATE_24M <<
- TXDESC_RTS_RATE_SHIFT_8723B);
- tx_desc->txdw3 |=
- cpu_to_le32(TXDESC_RTS_CTS_ENABLE_8723B);
- tx_desc->txdw3 |=
- cpu_to_le32(TXDESC_HW_RTS_ENABLE_8723B);
+ TXDESC40_RTS_RATE_SHIFT);
+ tx_desc->txdw3 |= cpu_to_le32(TXDESC40_RTS_CTS_ENABLE);
+ tx_desc->txdw3 |= cpu_to_le32(TXDESC40_HW_RTS_ENABLE);
}
}
@@ -7491,15 +5037,21 @@ static void rtl8xxxu_rx_urb_work(struct work_struct *work)
}
}
-static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
{
- struct rtl8xxxu_rx_desc *rx_desc = (struct rtl8xxxu_rx_desc *)skb->data;
+ struct rtl8xxxu_rxdesc16 *rx_desc =
+ (struct rtl8xxxu_rxdesc16 *)skb->data;
struct rtl8723au_phy_stats *phy_stats;
+ __le32 *_rx_desc_le = (__le32 *)skb->data;
+ u32 *_rx_desc = (u32 *)skb->data;
int drvinfo_sz, desc_shift;
+ int i;
+
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc16) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
- skb_pull(skb, sizeof(struct rtl8xxxu_rx_desc));
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc16));
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
@@ -7531,16 +5083,21 @@ static int rtl8723au_parse_rx_desc(struct rtl8xxxu_priv *priv,
return RX_TYPE_DATA_PKT;
}
-static int rtl8723bu_parse_rx_desc(struct rtl8xxxu_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_rx_status *rx_status)
+int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb,
+ struct ieee80211_rx_status *rx_status)
{
- struct rtl8723bu_rx_desc *rx_desc =
- (struct rtl8723bu_rx_desc *)skb->data;
+ struct rtl8xxxu_rxdesc24 *rx_desc =
+ (struct rtl8xxxu_rxdesc24 *)skb->data;
struct rtl8723au_phy_stats *phy_stats;
+ __le32 *_rx_desc_le = (__le32 *)skb->data;
+ u32 *_rx_desc = (u32 *)skb->data;
int drvinfo_sz, desc_shift;
+ int i;
- skb_pull(skb, sizeof(struct rtl8723bu_rx_desc));
+ for (i = 0; i < (sizeof(struct rtl8xxxu_rxdesc24) / sizeof(u32)); i++)
+ _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+
+ skb_pull(skb, sizeof(struct rtl8xxxu_rxdesc24));
phy_stats = (struct rtl8723au_phy_stats *)skb->data;
@@ -7632,12 +5189,7 @@ static void rtl8xxxu_rx_complete(struct urb *urb)
struct sk_buff *skb = (struct sk_buff *)urb->context;
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
struct device *dev = &priv->udev->dev;
- __le32 *_rx_desc_le = (__le32 *)skb->data;
- u32 *_rx_desc = (u32 *)skb->data;
- int rx_type, i;
-
- for (i = 0; i < (sizeof(struct rtl8xxxu_rx_desc) / sizeof(u32)); i++)
- _rx_desc[i] = le32_to_cpu(_rx_desc_le[i]);
+ int rx_type;
skb_put(skb, urb->actual_length);
@@ -7676,14 +5228,15 @@ static int rtl8xxxu_submit_rx_urb(struct rtl8xxxu_priv *priv,
{
struct sk_buff *skb;
int skb_size;
- int ret;
+ int ret, rx_desc_sz;
- skb_size = sizeof(struct rtl8xxxu_rx_desc) + RTL_RX_BUFFER_SIZE;
+ rx_desc_sz = priv->fops->rx_desc_size;
+ skb_size = rx_desc_sz + RTL_RX_BUFFER_SIZE;
skb = __netdev_alloc_skb(NULL, skb_size, GFP_KERNEL);
if (!skb)
return -ENOMEM;
- memset(skb->data, 0, sizeof(struct rtl8xxxu_rx_desc));
+ memset(skb->data, 0, rx_desc_sz);
usb_fill_bulk_urb(&rx_urb->urb, priv->udev, priv->pipe_in, skb->data,
skb_size, rtl8xxxu_rx_complete, skb);
usb_anchor_urb(&rx_urb->urb, &priv->rx_anchor);
@@ -7749,7 +5302,7 @@ static int rtl8xxxu_add_interface(struct ieee80211_hw *hw,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
- rtl8723a_stop_tx_beacon(priv);
+ rtl8xxxu_stop_tx_beacon(priv);
val8 = rtl8xxxu_read8(priv, REG_BEACON_CTRL);
val8 |= BEACON_ATIM | BEACON_FUNCTION_ENABLE |
@@ -8153,6 +5706,8 @@ static void rtl8xxxu_stop(struct ieee80211_hw *hw)
if (priv->usb_interrupts)
usb_kill_anchored_urbs(&priv->int_anchor);
+ rtl8xxxu_write8(priv, REG_TXPAUSE, 0xff);
+
priv->fops->disable_rf(priv);
/*
@@ -8285,6 +5840,10 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
if (id->idProduct == 0x7811)
untested = 0;
break;
+ case 0x050d:
+ if (id->idProduct == 0x1004)
+ untested = 0;
+ break;
default:
break;
}
@@ -8377,7 +5936,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
dev_info(&udev->dev, "Enabling HT_20_40 on the 2.4GHz band\n");
sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
}
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
hw->wiphy->rts_threshold = 2347;
@@ -8413,13 +5972,14 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
hw = usb_get_intfdata(interface);
priv = hw->priv;
- rtl8xxxu_disable_device(hw);
+ ieee80211_unregister_hw(hw);
+
+ priv->fops->power_off(priv);
+
usb_set_intfdata(interface, NULL);
dev_info(&priv->udev->dev, "disconnecting\n");
- ieee80211_unregister_hw(hw);
-
kfree(priv->fw_data);
mutex_destroy(&priv->usb_buf_mutex);
mutex_destroy(&priv->h2c_mutex);
@@ -8428,115 +5988,6 @@ static void rtl8xxxu_disconnect(struct usb_interface *interface)
ieee80211_free_hw(hw);
}
-static struct rtl8xxxu_fileops rtl8723au_fops = {
- .parse_efuse = rtl8723au_parse_efuse,
- .load_firmware = rtl8723au_load_firmware,
- .power_on = rtl8723au_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_init_llt_table,
- .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
- .config_channel = rtl8723au_config_channel,
- .parse_rx_desc = rtl8723au_parse_rx_desc,
- .enable_rf = rtl8723a_enable_rf,
- .disable_rf = rtl8723a_disable_rf,
- .set_tx_power = rtl8723a_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 1024,
- .mbox_ext_reg = REG_HMBOX_EXT_0,
- .mbox_ext_width = 2,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .adda_1t_init = 0x0b1b25a0,
- .adda_1t_path_on = 0x0bdb25a0,
- .adda_2t_path_on_a = 0x04db25a4,
- .adda_2t_path_on_b = 0x0b1b25a4,
-};
-
-static struct rtl8xxxu_fileops rtl8723bu_fops = {
- .parse_efuse = rtl8723bu_parse_efuse,
- .load_firmware = rtl8723bu_load_firmware,
- .power_on = rtl8723bu_power_on,
- .power_off = rtl8723bu_power_off,
- .reset_8051 = rtl8723bu_reset_8051,
- .llt_init = rtl8xxxu_auto_llt_table,
- .phy_init_antenna_selection = rtl8723bu_phy_init_antenna_selection,
- .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
- .config_channel = rtl8723bu_config_channel,
- .parse_rx_desc = rtl8723bu_parse_rx_desc,
- .init_aggregation = rtl8723bu_init_aggregation,
- .init_statistics = rtl8723bu_init_statistics,
- .enable_rf = rtl8723b_enable_rf,
- .disable_rf = rtl8723b_disable_rf,
- .set_tx_power = rtl8723b_set_tx_power,
- .update_rate_mask = rtl8723bu_update_rate_mask,
- .report_connect = rtl8723bu_report_connect,
- .writeN_block_size = 1024,
- .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
- .mbox_ext_width = 4,
- .tx_desc_size = sizeof(struct rtl8723bu_tx_desc),
- .has_s0s1 = 1,
- .adda_1t_init = 0x01c00014,
- .adda_1t_path_on = 0x01c00014,
- .adda_2t_path_on_a = 0x01c00014,
- .adda_2t_path_on_b = 0x01c00014,
-};
-
-#ifdef CONFIG_RTL8XXXU_UNTESTED
-
-static struct rtl8xxxu_fileops rtl8192cu_fops = {
- .parse_efuse = rtl8192cu_parse_efuse,
- .load_firmware = rtl8192cu_load_firmware,
- .power_on = rtl8192cu_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_init_llt_table,
- .phy_iq_calibrate = rtl8723au_phy_iq_calibrate,
- .config_channel = rtl8723au_config_channel,
- .parse_rx_desc = rtl8723au_parse_rx_desc,
- .enable_rf = rtl8723a_enable_rf,
- .disable_rf = rtl8723a_disable_rf,
- .set_tx_power = rtl8723a_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 128,
- .mbox_ext_reg = REG_HMBOX_EXT_0,
- .mbox_ext_width = 2,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .adda_1t_init = 0x0b1b25a0,
- .adda_1t_path_on = 0x0bdb25a0,
- .adda_2t_path_on_a = 0x04db25a4,
- .adda_2t_path_on_b = 0x0b1b25a4,
-};
-
-#endif
-
-static struct rtl8xxxu_fileops rtl8192eu_fops = {
- .parse_efuse = rtl8192eu_parse_efuse,
- .load_firmware = rtl8192eu_load_firmware,
- .power_on = rtl8192eu_power_on,
- .power_off = rtl8xxxu_power_off,
- .reset_8051 = rtl8xxxu_reset_8051,
- .llt_init = rtl8xxxu_auto_llt_table,
- .phy_iq_calibrate = rtl8723bu_phy_iq_calibrate,
- .config_channel = rtl8723bu_config_channel,
- .parse_rx_desc = rtl8723bu_parse_rx_desc,
- .enable_rf = rtl8723b_enable_rf,
- .disable_rf = rtl8723b_disable_rf,
- .set_tx_power = rtl8723b_set_tx_power,
- .update_rate_mask = rtl8723au_update_rate_mask,
- .report_connect = rtl8723au_report_connect,
- .writeN_block_size = 128,
- .mbox_ext_reg = REG_HMBOX_EXT0_8723B,
- .mbox_ext_width = 4,
- .tx_desc_size = sizeof(struct rtl8723au_tx_desc),
- .has_s0s1 = 1,
- .adda_1t_init = 0x0fc01616,
- .adda_1t_path_on = 0x0fc01616,
- .adda_2t_path_on_a = 0x0fc01616,
- .adda_2t_path_on_b = 0x0fc01616,
-};
-
static struct usb_device_id dev_table[] = {
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8724, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8723au_fops},
@@ -8559,6 +6010,9 @@ static struct usb_device_id dev_table[] = {
/* Tested by Larry Finger */
{USB_DEVICE_AND_INTERFACE_INFO(0x7392, 0x7811, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
+/* Tested by Andrea Merello */
+{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
+ .driver_info = (unsigned long)&rtl8192cu_fops},
/* Currently untested 8188 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_REALTEK, 0x8191, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
@@ -8643,8 +6097,6 @@ static struct usb_device_id dev_table[] = {
/* Currently untested 8192 series devices */
{USB_DEVICE_AND_INTERFACE_INFO(0x04bb, 0x0950, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
-{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x1004, 0xff, 0xff, 0xff),
- .driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2102, 0xff, 0xff, 0xff),
.driver_info = (unsigned long)&rtl8192cu_fops},
{USB_DEVICE_AND_INTERFACE_INFO(0x050d, 0x2103, 0xff, 0xff, 0xff),
@@ -8700,6 +6152,7 @@ static struct usb_driver rtl8xxxu_driver = {
.probe = rtl8xxxu_probe,
.disconnect = rtl8xxxu_disconnect,
.id_table = dev_table,
+ .no_dynamic_id = 1,
.disable_hub_initiated_lpm = 1,
};
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index e545e849f..b0e0c6423 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 - 2015 Jes Sorensen <Jes.Sorensen@redhat.com>
+ * Copyright (c) 2014 - 2016 Jes Sorensen <Jes.Sorensen@redhat.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -109,6 +109,9 @@
#define AFE_XTAL_GATE_DIG BIT(17)
#define AFE_XTAL_BT_GATE BIT(20)
+/*
+ * 0x0028 is also known as REG_AFE_CTRL2 on 8723bu/8192eu
+ */
#define REG_AFE_PLL_CTRL 0x0028
#define AFE_PLL_ENABLE BIT(0)
#define AFE_PLL_320_ENABLE BIT(1)
@@ -192,6 +195,7 @@
control */
#define MULTI_GPS_FUNC_EN BIT(22) /* GPS function enable */
+#define REG_AFE_CTRL4 0x0078 /* 8192eu/8723bu */
#define REG_LDO_SW_CTRL 0x007c /* 8192eu */
#define REG_MCU_FW_DL 0x0080
@@ -383,7 +387,7 @@
#define REG_RQPN 0x0200
#define RQPN_HI_PQ_SHIFT 0
#define RQPN_LO_PQ_SHIFT 8
-#define RQPN_NORM_PQ_SHIFT 16
+#define RQPN_PUB_PQ_SHIFT 16
#define RQPN_LOAD BIT(31)
#define REG_FIFOPAGE 0x0204
@@ -417,13 +421,20 @@
/* spec version 11 */
/* 0x0400 ~ 0x047F Protocol Configuration */
-#define REG_VOQ_INFORMATION 0x0400
-#define REG_VIQ_INFORMATION 0x0404
-#define REG_BEQ_INFORMATION 0x0408
-#define REG_BKQ_INFORMATION 0x040c
-#define REG_MGQ_INFORMATION 0x0410
-#define REG_HGQ_INFORMATION 0x0414
-#define REG_BCNQ_INFORMATION 0x0418
+/* 8192c, 8192d */
+#define REG_VOQ_INFO 0x0400
+#define REG_VIQ_INFO 0x0404
+#define REG_BEQ_INFO 0x0408
+#define REG_BKQ_INFO 0x040c
+/* 8188e, 8723a, 8812a, 8821a, 8192e, 8723b */
+#define REG_Q0_INFO 0x400
+#define REG_Q1_INFO 0x404
+#define REG_Q2_INFO 0x408
+#define REG_Q3_INFO 0x40c
+
+#define REG_MGQ_INFO 0x0410
+#define REG_HGQ_INFO 0x0414
+#define REG_BCNQ_INFO 0x0418
#define REG_CPU_MGQ_INFORMATION 0x041c
#define REG_FWHW_TXQ_CTRL 0x0420
@@ -494,6 +505,9 @@
#define REG_DATA_SUBCHANNEL 0x0483
/* 8723au */
#define REG_INIDATA_RATE_SEL 0x0484
+/* MACID_SLEEP_1/3 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_3_8732B 0x0484
+#define REG_MACID_SLEEP_1_8732B 0x0488
#define REG_POWER_STATUS 0x04a4
#define REG_POWER_STAGE1 0x04b4
@@ -502,12 +516,20 @@
#define REG_PKT_VO_VI_LIFE_TIME 0x04c0
#define REG_PKT_BE_BK_LIFE_TIME 0x04c2
#define REG_STBC_SETTING 0x04c4
+#define REG_QUEUE_CTRL 0x04c6
#define REG_HT_SINGLE_AMPDU_8723B 0x04c7
#define REG_PROT_MODE_CTRL 0x04c8
#define REG_MAX_AGGR_NUM 0x04ca
#define REG_RTS_MAX_AGGR_NUM 0x04cb
#define REG_BAR_MODE_CTRL 0x04cc
#define REG_RA_TRY_RATE_AGG_LMT 0x04cf
+/* MACID_DROP for 8723a */
+#define REG_MACID_DROP_8732A 0x04d0
+/* EARLY_MODE_CONTROL 8188e */
+#define REG_EARLY_MODE_CONTROL_8188E 0x04d0
+/* MACID_SLEEP_2 for 8723b, 8192e, 8812a, 8821a */
+#define REG_MACID_SLEEP_2_8732B 0x04d0
+#define REG_MACID_SLEEP 0x04d4
#define REG_NQOS_SEQ 0x04dc
#define REG_QOS_SEQ 0x04de
#define REG_NEED_CPU_HANDLE 0x04e0
@@ -860,6 +882,10 @@
#define CCK0_SIDEBAND BIT(4)
#define REG_CCK0_AFE_SETTING 0x0a04
+#define CCK0_AFE_RX_MASK 0x0f000000
+#define CCK0_AFE_RX_ANT_AB BIT(24)
+#define CCK0_AFE_RX_ANT_A 0
+#define CCK0_AFE_RX_ANT_B (BIT(24) | BIT(26))
#define REG_CONFIG_ANT_A 0x0b68
#define REG_CONFIG_ANT_B 0x0b6c
@@ -1026,6 +1052,7 @@
#define USB_HIMR_ROK BIT(0) /* Receive DMA OK Interrupt */
#define REG_USB_SPECIAL_OPTION 0xfe55
+#define REG_USB_HRPWM 0xfe58
#define REG_USB_DMA_AGG_TO 0xfe5b
#define REG_USB_AGG_TO 0xfe5c
#define REG_USB_AGG_TH 0xfe5d
@@ -1111,6 +1138,7 @@
#define RF6052_REG_T_METER_8723B 0x42
#define RF6052_REG_UNKNOWN_43 0x43
#define RF6052_REG_UNKNOWN_55 0x55
+#define RF6052_REG_UNKNOWN_56 0x56
#define RF6052_REG_S0S1 0xb0
#define RF6052_REG_UNKNOWN_DF 0xdf
#define RF6052_REG_UNKNOWN_ED 0xed
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 7a40d8dff..264466f59 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -131,7 +131,7 @@ static struct ieee80211_rate rtl_ratetable_5g[] = {
};
static const struct ieee80211_supported_band rtl_band_2ghz = {
- .band = IEEE80211_BAND_2GHZ,
+ .band = NL80211_BAND_2GHZ,
.channels = rtl_channeltable_2g,
.n_channels = ARRAY_SIZE(rtl_channeltable_2g),
@@ -143,7 +143,7 @@ static const struct ieee80211_supported_band rtl_band_2ghz = {
};
static struct ieee80211_supported_band rtl_band_5ghz = {
- .band = IEEE80211_BAND_5GHZ,
+ .band = NL80211_BAND_5GHZ,
.channels = rtl_channeltable_5g,
.n_channels = ARRAY_SIZE(rtl_channeltable_5g),
@@ -197,7 +197,7 @@ static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- /*hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /*hw->wiphy->bands[NL80211_BAND_2GHZ]
*base on ant_num
*rx_mask: RX mask
*if rx_ant = 1 rx_mask[0]= 0xff;==>MCS0-MCS7
@@ -328,26 +328,26 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
rtlhal->bandset == BAND_ON_BOTH) {
/* 1: 2.4 G bands */
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
+ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]), &rtl_band_2ghz,
sizeof(struct ieee80211_supported_band));
/* <3> init ht cap base on ant_num */
_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
/* 2: 5 G bands */
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]), &rtl_band_5ghz,
+ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]), &rtl_band_5ghz,
sizeof(struct ieee80211_supported_band));
/* <3> init ht cap base on ant_num */
@@ -355,15 +355,15 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
} else {
if (rtlhal->current_bandtype == BAND_ON_2_4G) {
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_2GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_2GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]),
+ memcpy(&(rtlmac->bands[NL80211_BAND_2GHZ]),
&rtl_band_2ghz,
sizeof(struct ieee80211_supported_band));
@@ -371,14 +371,14 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_ht_capab(hw, &sband->ht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
} else if (rtlhal->current_bandtype == BAND_ON_5G) {
/* <1> use mac->bands as mem for hw->wiphy->bands */
- sband = &(rtlmac->bands[IEEE80211_BAND_5GHZ]);
+ sband = &(rtlmac->bands[NL80211_BAND_5GHZ]);
- /* <2> set hw->wiphy->bands[IEEE80211_BAND_5GHZ]
+ /* <2> set hw->wiphy->bands[NL80211_BAND_5GHZ]
* to default value(1T1R) */
- memcpy(&(rtlmac->bands[IEEE80211_BAND_5GHZ]),
+ memcpy(&(rtlmac->bands[NL80211_BAND_5GHZ]),
&rtl_band_5ghz,
sizeof(struct ieee80211_supported_band));
@@ -387,7 +387,7 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
_rtl_init_hw_vht_capab(hw, &sband->vht_cap);
/* <4> set mac->sband to wiphy->sband */
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = sband;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
} else {
RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Err BAND %d\n",
rtlhal->current_bandtype);
@@ -861,7 +861,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
/* mac80211's rate_idx is like this:
*
- * 2.4G band:rx_status->band == IEEE80211_BAND_2GHZ
+ * 2.4G band:rx_status->band == NL80211_BAND_2GHZ
*
* B/G rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
@@ -871,7 +871,7 @@ static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw,
* (rx_status->flag & RX_FLAG_HT) = 1,
* DESC_RATEMCS0-->DESC_RATEMCS15 ==> idx is 0-->15
*
- * 5G band:rx_status->band == IEEE80211_BAND_5GHZ
+ * 5G band:rx_status->band == NL80211_BAND_5GHZ
* A rate:
* (rx_status->flag & RX_FLAG_HT) = 0,
* DESC_RATE6M-->DESC_RATE54M ==> idx is 0-->7,
@@ -958,7 +958,7 @@ int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, bool isvht,
return rate_idx;
}
if (false == isht) {
- if (IEEE80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
+ if (NL80211_BAND_2GHZ == hw->conf.chandef.chan->band) {
switch (desc_rate) {
case DESC_RATE1M:
rate_idx = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
index 451456835..a30af6cc2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8192e2ant.c
@@ -70,83 +70,83 @@ static u8 halbtc8192e2ant_btrssi_state(u8 level_num, u8 rssi_thresh,
if (level_num == 2) {
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = LOW\n");
if (btrssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = HIGH\n");
if (btrssi < rssi_thresh) {
btrssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
if ((coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_LOW) ||
(coex_sta->pre_bt_rssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = LOW\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = LOW\n");
if (btrssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
(coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_STAY_MEDIUM)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi pre state = MEDIUM\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi pre state = MEDIUM\n");
if (btrssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
btrssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to High\n");
} else if (btrssi < rssi_thresh) {
btrssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Low\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at Medium\n");
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi pre state = HIGH\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi pre state = HIGH\n");
if (btrssi < rssi_thresh1) {
btrssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state switch to Medium\n");
} else {
btrssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "BT Rssi state stay at High\n");
}
}
}
@@ -173,32 +173,28 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
}
} else {
if (wifirssi < rssi_thresh) {
wifirssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -209,14 +205,12 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -225,31 +219,26 @@ static u8 halbtc8192e2ant_wifirssi_state(struct btc_coexist *btcoexist,
if (wifirssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8192E_2ANT)) {
wifirssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to High\n");
} else if (wifirssi < rssi_thresh) {
wifirssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Low\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at Medium\n");
}
} else {
if (wifirssi < rssi_thresh1) {
wifirssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state switch to Medium\n");
} else {
wifirssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "wifi RSSI state stay at High\n");
}
}
}
@@ -284,26 +273,26 @@ static void btc8192e2ant_monitor_bt_enable_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -499,12 +488,12 @@ static void halbtc8192e2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex] High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex] Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -518,9 +507,9 @@ static void halbtc8192e2ant_querybt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -592,8 +581,8 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hson);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "No BT link exists!!!\n");
return algorithm;
}
@@ -608,27 +597,27 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP only\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "PAN(HS) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "PAN(EDR) only\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR;
}
@@ -637,21 +626,21 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP ==> SCO\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
@@ -660,38 +649,38 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
if (stack_info->num_of_hid >= 2) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID*2 + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID*2 + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
}
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -701,30 +690,30 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + A2DP ==> HID\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + HID + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_SCO_PAN;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP + PAN(HS)\n");
algorithm = BT_8192E_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -734,13 +723,13 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP + PAN(HS)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -752,12 +741,12 @@ static u8 halbtc8192e2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hson) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "ErrorSCO+HID+A2DP+PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "ErrorSCO+HID+A2DP+PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "SCO+HID+A2DP+PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "SCO+HID+A2DP+PAN(EDR)\n");
algorithm =
BT_8192E_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -778,10 +767,10 @@ static void halbtc8192e2ant_setfw_dac_swinglevel(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swinglvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -793,9 +782,9 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
h2c_parameter[0] = dec_btpwr_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
- dec_btpwr_lvl, h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex] decrease Bt Power level = %d, FW write 0x62 = 0x%x\n",
+ dec_btpwr_lvl, h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -803,15 +792,15 @@ static void halbtc8192e2ant_set_fwdec_btpwr(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_dec_btpwr(struct btc_coexist *btcoexist,
bool force_exec, u8 dec_btpwr_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power level = %d\n",
- (force_exec ? "force to" : ""), dec_btpwr_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power level = %d\n",
+ (force_exec ? "force to" : ""), dec_btpwr_lvl);
coex_dm->cur_dec_bt_pwr = dec_btpwr_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preBtDecPwrLvl=%d, curBtDecPwrLvl=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
}
halbtc8192e2ant_set_fwdec_btpwr(btcoexist, coex_dm->cur_dec_bt_pwr);
@@ -828,10 +817,10 @@ static void halbtc8192e2ant_set_bt_autoreport(struct btc_coexist *btcoexist,
if (enable_autoreport)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_autoreport ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_autoreport ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -840,17 +829,17 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_autoreport)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_autoreport) ? "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_autoreport) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_autoreport;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] bPreBtAutoReport=%d, bCurBtAutoReport=%d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -864,16 +853,16 @@ static void halbtc8192e2ant_bt_autoreport(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_fw_dac_swinglvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swinglvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swinglvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swinglvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex] preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -891,8 +880,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -900,8 +889,8 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->btRf0x1eBackup
*/
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -912,17 +901,17 @@ static void btc8192e2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex]bPreRfRxLpfShrink=%d,bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -939,8 +928,8 @@ static void halbtc8192e2ant_set_dac_swingreg(struct btc_coexist *btcoexist,
{
u8 val = (u8)level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -958,22 +947,22 @@ static void halbtc8192e2ant_DacSwing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swingon,
u32 dac_swinglvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swinglvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swingon) ? "ON" : "OFF"), dac_swinglvl);
coex_dm->cur_dac_swing_on = dac_swingon;
coex_dm->cur_dac_swing_lvl = dac_swinglvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl = 0x%x, ",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "bCurDacSwingOn=%d, curDacSwingLvl = 0x%x\n",
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -991,8 +980,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
{
/* BB AGC Gain Table */
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x0a1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x091B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x081C0001);
@@ -1000,8 +989,8 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x061E0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x051F0001);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -1014,16 +1003,17 @@ static void halbtc8192e2ant_set_agc_table(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_AgcTable(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- ((agc_table_en) ? "Enable" : "Disable"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ ((agc_table_en) ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -1037,20 +1027,20 @@ static void halbtc8192e2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -1059,30 +1049,30 @@ static void halbtc8192e2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
- (force_exec ? "force to" : ""), val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, ",
+ (force_exec ? "force to" : ""), val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x,\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0 = 0x%x, preVal0x6c4 = 0x%x, ",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "preVal0x6c8 = 0x%x, preVal0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0 = 0x%x, curVal0x6c4 = 0x%x\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "curVal0x6c8 = 0x%x, curVal0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1136,9 +1126,9 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex]set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1146,18 +1136,18 @@ static void halbtc8192e2ant_set_fw_ignore_wlanact(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_IgnoreWlanAct(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d ",
- coex_dm->pre_ignore_wlan_act);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d ",
+ coex_dm->pre_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1185,11 +1175,11 @@ static void halbtc8192e2ant_SetFwPstdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1213,20 +1203,20 @@ static void btc8192e2ant_sw_mec2(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1353,8 +1343,8 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
u8 mimops = BTC_MIMO_PS_DYNAMIC;
u32 disra_mask = 0x0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], REAL set SS Type = %d\n", sstype);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], REAL set SS Type = %d\n", sstype);
disra_mask = halbtc8192e2ant_decidera_mask(btcoexist, sstype,
coex_dm->curra_masktype);
@@ -1386,9 +1376,9 @@ static void halbtc8192e2ant_set_switch_sstype(struct btc_coexist *btcoexist,
static void halbtc8192e2ant_switch_sstype(struct btc_coexist *btcoexist,
bool force_exec, u8 new_sstype)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], %s Switch SS Type = %d\n",
- (force_exec ? "force to" : ""), new_sstype);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], %s Switch SS Type = %d\n",
+ (force_exec ? "force to" : ""), new_sstype);
coex_dm->cur_sstype = new_sstype;
if (!force_exec) {
@@ -1469,8 +1459,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
if ((BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) ||
@@ -1506,8 +1496,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi connected + BT non connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1534,8 +1524,8 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hson)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi connected + BT connected-idle!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 2);
@@ -1560,12 +1550,12 @@ static bool halbtc8192e2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Wifi Connected-Idle + BT Busy!!\n");
halbtc8192e2ant_switch_sstype(btcoexist,
NORMAL_EXEC, 1);
@@ -1592,9 +1582,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1689,9 +1678,8 @@ static void btc8192e_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1795,9 +1783,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1886,9 +1873,8 @@ static void btc8192e_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1983,9 +1969,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -2074,9 +2059,8 @@ static void btc8192e_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8192e2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2178,13 +2162,13 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
int result;
u8 retry_cnt = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2288,11 +2272,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_cnt = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_cnt = %d\n", retry_cnt);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
- up, dn, m, n, wait_cnt);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_cnt = %d\n", retry_cnt);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_cnt=%d\n",
+ up, dn, m, n, wait_cnt);
result = 0;
wait_cnt++;
/* no retry in the last 2-second duration */
@@ -2309,9 +2293,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex]Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex]Increase wifi duration!!\n");
}
} else if (retry_cnt <= 3) {
up--;
@@ -2334,9 +2317,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "Reduce wifi duration for retry<3\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "Reduce wifi duration for retry<3\n");
}
} else {
if (wait_cnt == 1)
@@ -2352,12 +2334,12 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_cnt = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "Decrease wifi duration for retryCounter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8192e_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2373,11 +2355,11 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, ");
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, ");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2388,9 +2370,8 @@ static void halbtc8192e2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
true,
coex_dm->tdma_adj_type);
else
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2594,8 +2575,8 @@ static void halbtc8192e2ant_action_a2dp(struct btc_coexist *btcoexist)
btrssi_state == BTC_RSSI_STATE_STAY_LOW) &&
(wifirssi_state == BTC_RSSI_STATE_LOW ||
wifirssi_state == BTC_RSSI_STATE_STAY_LOW)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2dp, wifi/bt rssi both LOW!!\n");
long_dist = true;
}
if (long_dist) {
@@ -3100,105 +3081,105 @@ static void halbtc8192e2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
u8 algorithm = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = halbtc8192e2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8192E_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8192e2ant_action_bt_inquiry(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8192e2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
- coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex] preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8192E_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO\n");
halbtc8192e2ant_action_sco(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_SCO_PAN:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = SCO+PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = SCO+PAN(EDR)\n");
halbtc8192e2ant_action_sco_pan(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID\n");
halbtc8192e2ant_action_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP\n");
halbtc8192e2ant_action_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8192e2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8192e2ant_action_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HS mode\n");
halbtc8192e2ant_action_pan_hs(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8192e2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8192e2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8192e2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8192E_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8192e2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "Action 2-Ant, algorithm = unknown!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "Action 2-Ant, algorithm = unknown!!\n");
/* halbtc8192e2ant_coex_alloff(btcoexist); */
break;
}
@@ -3212,8 +3193,8 @@ static void halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist,
u16 u16tmp = 0;
u8 u8tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
if (backup) {
/* backup rf 0x1e value */
@@ -3296,8 +3277,8 @@ void ex_halbtc8192e2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8192e2ant_init_coex_dm(btcoexist);
}
@@ -3525,13 +3506,13 @@ void ex_halbtc8192e2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8192e2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
}
}
@@ -3539,12 +3520,12 @@ void ex_halbtc8192e2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3552,21 +3533,21 @@ void ex_halbtc8192e2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8192e2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_halbtc8192e2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3582,11 +3563,11 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -3606,10 +3587,10 @@ void ex_halbtc8192e2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3618,8 +3599,8 @@ void ex_halbtc8192e2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
if (type == BTC_PACKET_DHCP)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3637,19 +3618,19 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8192E_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8192E_2ANT_WIFI_FW != rsp_source) {
@@ -3666,8 +3647,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "bit1, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "bit1, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3683,8 +3664,8 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3)) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "bit3, BT NOT ignore Wlan active!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "bit3, BT NOT ignore Wlan active!\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist,
FORCE_EXEC,
false);
@@ -3742,25 +3723,25 @@ void ex_halbtc8192e2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8192E_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8192E_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8192E_2ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8192E_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8192E_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], bt_infoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8192E_2ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex]bt_infoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8192E_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3788,7 +3769,7 @@ void ex_halbtc8192e2ant_stack_operation_notify(struct btc_coexist *btcoexist,
void ex_halbtc8192e2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
halbtc8192e2ant_IgnoreWlanAct(btcoexist, FORCE_EXEC, true);
ex_halbtc8192e2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3801,29 +3782,29 @@ void ex_halbtc8192e2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "=======================Periodical=======================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "=======================Periodical=======================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "BT stack/ hci ext ver = %s / %d\n",
+ ((stack_info->profile_notified) ? "Yes" : "No"),
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8192e_2ant, glcoex_ver_8192e_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8192E_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
index 7e239d3ce..16add42a6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b1ant.c
@@ -74,28 +74,28 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -104,12 +104,12 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -118,26 +118,26 @@ static u8 halbtc8723b1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -165,32 +165,28 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -201,14 +197,12 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -217,31 +211,26 @@ static u8 halbtc8723b1ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_1ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -435,9 +424,9 @@ static void halbtc8723b1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -532,8 +521,8 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -548,27 +537,27 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (numdiffprofile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR;
}
@@ -577,21 +566,21 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (numdiffprofile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -599,32 +588,32 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -634,31 +623,31 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8723B_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -668,13 +657,13 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -686,11 +675,11 @@ static u8 halbtc8723b1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_1ANT_COEX_ALGO_PANEDR_HID;
}
@@ -717,9 +706,9 @@ static void btc8723b1ant_set_sw_pen_tx_rate_adapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36 */
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -743,20 +732,20 @@ static void halbtc8723b1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -765,10 +754,10 @@ static void halbtc8723b1ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
@@ -839,9 +828,9 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -849,16 +838,16 @@ static void halbtc8723b1ant_SetFwIgnoreWlanAct(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -882,8 +871,8 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
if (ap_enable) {
if ((byte1 & BIT4) && !(byte1 & BIT5)) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], FW for 1Ant AP mode\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], FW for 1Ant AP mode\n");
real_byte1 &= ~BIT4;
real_byte1 |= BIT5;
@@ -904,13 +893,13 @@ static void halbtc8723b1ant_set_fw_ps_tdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = real_byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 |
- h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 |
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -929,22 +918,22 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
bool force_exec,
u8 lps_val, u8 rpwm_val)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x , LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last = 0x%x , LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -958,8 +947,8 @@ static void halbtc8723b1ant_LpsRpwm(struct btc_coexist *btcoexist,
static void halbtc8723b1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8723b1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1174,13 +1163,13 @@ static void halbtc8723b1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on)
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(on, %d) *********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(on, %d) *********\n",
+ coex_dm->cur_ps_tdma);
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ******** TDMA(off, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ******** TDMA(off, %d) ********\n",
+ coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1394,45 +1383,45 @@ static bool halbtc8723b1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE == coex_dm->bt_status) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else if (!wifi_connected &&
(BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- ("[BTCoex], Wifi non connected-idle + BT Busy!!\n"));
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8723b1ant_sw_mechanism(btcoexist, false);
commom = true;
} else {
if (wifi_busy)
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
commom = false;
}
@@ -1451,8 +1440,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
u8 retry_count = 0, bt_info_ext;
bool wifi_busy = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if (BT_8723B_1ANT_WIFI_STATUS_CONNECTED_BUSY == wifi_status)
wifi_busy = true;
@@ -1481,8 +1470,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8723b1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1513,9 +1502,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
up--;
@@ -1538,9 +1526,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -1556,8 +1543,8 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1602,9 +1589,9 @@ static void btc8723b1ant_tdma_dur_adj_for_acl(struct btc_coexist *btcoexist,
}
} else { /*no change */
/*if busy / idle change */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex],********* TDMA(on, %d) ********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex],********* TDMA(on, %d) ********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 && coex_dm->cur_ps_tdma != 2 &&
@@ -2010,15 +1997,15 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false, ap_enable = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_4_WAY_PROGRESS,
&under_4way);
if (under_4way) {
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -2032,8 +2019,8 @@ static void halbtc8723b1ant_action_wifi_connected(struct btc_coexist *btcoexist)
else
halbtc8723b1ant_action_wifi_connected_special_packet(
btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -2102,58 +2089,58 @@ static void btc8723b1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8723b1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8723B_1ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8723b1ant_action_sco(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8723b1ant_action_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8723b1ant_action_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8723b1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8723b1ant_action_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8723b1ant_action_pan_hs(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8723b1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8723b1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8723b1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_1ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8723b1ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
break;
}
coex_dm->pre_algorithm = coex_dm->cur_algorithm;
@@ -2171,24 +2158,24 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u32 wifi_link_status = 0;
u32 num_of_wifi_link = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
@@ -2267,8 +2254,8 @@ static void halbtc8723b1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2305,8 +2292,8 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u8tmp = 0;
u32 cnt_bt_cal_chk = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (backup) {/* backup rf 0x1e value */
coex_dm->backup_arfr_cnt1 =
@@ -2333,14 +2320,14 @@ static void halbtc8723b1ant_init_hw_config(struct btc_coexist *btcoexist,
u32tmp = btcoexist->btc_read_4byte(btcoexist, 0x49d);
cnt_bt_cal_chk++;
if (u32tmp & BIT0) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
- cnt_bt_cal_chk);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ########### BT calibration(cnt=%d) ###########\n",
+ cnt_bt_cal_chk);
mdelay(50);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
- cnt_bt_cal_chk);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ********** BT NOT calibration (cnt=%d)**********\n",
+ cnt_bt_cal_chk);
break;
}
}
@@ -2383,8 +2370,8 @@ void ex_halbtc8723b1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2677,8 +2664,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT,
@@ -2689,8 +2676,8 @@ void ex_halbtc8723b1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
NORMAL_EXEC, 0);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
@@ -2705,12 +2692,12 @@ void ex_halbtc8723b1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -2753,15 +2740,15 @@ void ex_halbtc8723b1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn_scan(btcoexist);
else /* wifi is connected */
btc8723b1ant_action_wifi_conn_scan(btcoexist);
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) /* non-connected scan */
btc8723b1ant_action_wifi_not_conn(btcoexist);
else
@@ -2802,12 +2789,12 @@ void ex_halbtc8723b1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
btc8723b1ant_act_wifi_not_conn_asso_auth(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2830,11 +2817,11 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist, BTC_GET_U1_WIFI_CENTRAL_CHNL,
@@ -2855,10 +2842,10 @@ void ex_halbtc8723b1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2900,8 +2887,8 @@ void ex_halbtc8723b1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
halbtc8723b1ant_action_wifi_connected_special_packet(btcoexist);
}
}
@@ -2921,19 +2908,19 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length - 1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
if (BT_INFO_SRC_8723B_1ANT_WIFI_FW != rsp_source) {
@@ -2950,8 +2937,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
* because bt is reset and loss of the info.
*/
if (coex_sta->bt_info_ext & BIT1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -2965,8 +2952,8 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (coex_sta->bt_info_ext & BIT3) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT ignore Wlan active!!\n");
halbtc8723b1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -3021,30 +3008,30 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8723B_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_1ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_1ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info & BT_INFO_8723B_1ANT_B_ACL_BUSY) {
if (BT_8723B_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8723B_1ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status =
BT_8723B_1ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!\n");
}
if ((BT_8723B_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3060,7 +3047,7 @@ void ex_halbtc8723b1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -3078,11 +3065,11 @@ void ex_halbtc8723b1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Pnp notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8723b1ant_SetAntPath(btcoexist, BTC_ANT_PATH_BT, false,
true);
@@ -3092,8 +3079,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
halbtc8723b1ant_coex_table_with_type(btcoexist, NORMAL_EXEC, 2);
halbtc8723b1ant_wifi_off_hw_cfg(btcoexist);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8723b1ant_init_hw_config(btcoexist, false);
halbtc8723b1ant_init_coex_dm(btcoexist);
@@ -3103,8 +3090,8 @@ void ex_halbtc8723b1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
void ex_halbtc8723b1ant_coex_dm_reset(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], *****************Coex DM Reset****************\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], *****************Coex DM Reset****************\n");
halbtc8723b1ant_init_hw_config(btcoexist, false);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff, 0x0);
@@ -3119,31 +3106,31 @@ void ex_halbtc8723b1ant_periodical(struct btc_coexist *btcoexist)
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num, board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num, board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_1ant,
- glcoex_ver_8723b_1ant, fw_ver,
- bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_1ant,
+ glcoex_ver_8723b_1ant, fw_ver,
+ bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
index 77cbd10e8..5f488ecae 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8723b2ant.c
@@ -72,32 +72,28 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -106,14 +102,12 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -122,31 +116,26 @@ static u8 btc8723b2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -173,36 +162,28 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -213,16 +194,12 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -231,36 +208,26 @@ static u8 btc8723b2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8723B_2ANT) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state "
- "stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -292,12 +259,12 @@ static void btc8723b2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx(reg 0x%x)=0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -311,9 +278,9 @@ static void btc8723b2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -427,8 +394,8 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -443,27 +410,27 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR;
}
@@ -472,21 +439,21 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -494,31 +461,31 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex],A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex],A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP;
}
@@ -528,37 +495,32 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP"
- " ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + "
- "PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + "
- "PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -568,15 +530,13 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + "
- "PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + "
- "PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
@@ -588,13 +548,11 @@ static u8 btc8723b2ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID"
- " + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP +"
- " PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm =
BT_8723B_2ANT_COEX_ALGO_PANEDR_HID;
}
@@ -624,17 +582,15 @@ static bool btc8723b_need_dec_pwr(struct btc_coexist *btcoexist)
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt "
- "power for HS mode!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt "
- "power for Wifi is connected!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -653,10 +609,10 @@ static void btc8723b2ant_set_fw_dac_swing_level(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level=0x%x\n", dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64=0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -671,9 +627,9 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62=0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -681,15 +637,15 @@ static void btc8723b2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
- (force_exec ? "force to" : ""), (dec_bt_pwr ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ force_exec ? "force to" : "", dec_bt_pwr ? "ON" : "OFF");
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreDecBtPwr=%d, bCurDecBtPwr=%d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
return;
@@ -702,17 +658,16 @@ static void btc8723b2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
static void btc8723b2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec, u8 fw_dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], preFwDacSwingLvl=%d, "
- "curFwDacSwingLvl=%d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], preFwDacSwingLvl=%d, curFwDacSwingLvl=%d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -729,16 +684,16 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
/* Resume RF Rx LPF corner */
/* After initialized, we can use coex_dm->btRf0x1eBackup */
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -749,18 +704,17 @@ static void btc8723b2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void btc8723b2ant_rf_shrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
- "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""), (rx_rf_shrink_on ?
+ "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreRfRxLpfShrink=%d, "
- "bCurRfRxLpfShrink=%d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreRfRxLpfShrink=%d, bCurRfRxLpfShrink=%d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -788,9 +742,9 @@ static void btc8723b_set_penalty_txrate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -799,18 +753,17 @@ static void btc8723b2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
/*return; */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""), (low_penalty_ra ?
- "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""), (low_penalty_ra ?
+ "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreLowPenaltyRa=%d, "
- "bCurLowPenaltyRa=%d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreLowPenaltyRa=%d, bCurLowPenaltyRa=%d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -824,8 +777,8 @@ static void btc8723b2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
u32 level)
{
u8 val = (u8) level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x883, 0x3e, val);
}
@@ -843,20 +796,20 @@ static void btc8723b2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
- (force_exec ? "force to" : ""),
- (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing=%s, dac_swing_lvl=0x%x\n",
+ (force_exec ? "force to" : ""),
+ (dac_swing_on ? "ON" : "OFF"), dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x,"
- " bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
- coex_dm->pre_dac_swing_on, coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreDacSwingOn=%d, preDacSwingLvl=0x%x, bCurDacSwingOn=%d, curDacSwingLvl=0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl == coex_dm->cur_dac_swing_lvl))
@@ -877,8 +830,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
/* BB AGC Gain Table */
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table On!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6e1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6d1B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x6c1C0001);
@@ -887,8 +840,8 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x691F0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0x68200001);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB Agc Table Off!\n");
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xaa1A0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa91B0001);
btcoexist->btc_write_4byte(btcoexist, 0xc78, 0xa81C0001);
@@ -901,15 +854,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
/* RF Gain */
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xef, 0xfffff, 0x02000);
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x3b,
@@ -920,15 +873,15 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0xed, 0xfffff, 0x1);
if (agc_table_en) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table On!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38fff);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x38ffe);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Agc Table Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Agc Table Off!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
0xfffff, 0x380c3);
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x40,
@@ -946,16 +899,17 @@ static void btc8723b2ant_set_agc_table(struct btc_coexist *btcoexist,
static void btc8723b2ant_agc_table(struct btc_coexist *btcoexist,
bool force_exec, bool agc_table_en)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s %s Agc Table\n",
- (force_exec ? "force to" : ""),
- (agc_table_en ? "Enable" : "Disable"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s %s Agc Table\n",
+ (force_exec ? "force to" : ""),
+ (agc_table_en ? "Enable" : "Disable"));
coex_dm->cur_agc_table_en = agc_table_en;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
- coex_dm->pre_agc_table_en, coex_dm->cur_agc_table_en);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], bPreAgcTableEn=%d, bCurAgcTableEn=%d\n",
+ coex_dm->pre_agc_table_en,
+ coex_dm->cur_agc_table_en);
if (coex_dm->pre_agc_table_en == coex_dm->cur_agc_table_en)
return;
@@ -969,20 +923,20 @@ static void btc8723b2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0=0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4=0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8=0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc=0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -991,29 +945,24 @@ static void btc8723b2ant_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c4, u32 val0x6c8,
u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0=0x%x,"
- " 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0,
- val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0=0x%x, 0x6c4=0x%x, 0x6c8=0x%x, 0x6cc=0x%x\n",
+ force_exec ? "force to" : "",
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], preVal0x6c0=0x%x, "
- "preVal0x6c4=0x%x, preVal0x6c8=0x%x, "
- "preVal0x6cc=0x%x !!\n",
- coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], curVal0x6c0=0x%x, "
- "curVal0x6c4=0x%x, curVal0x6c8=0x%x, "
- "curVal0x6cc=0x%x !!\n",
- coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], preVal0x6c0=0x%x, preVal0x6c4=0x%x, preVal0x6c8=0x%x, preVal0x6cc=0x%x !!\n",
+ coex_dm->pre_val0x6c0, coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8, coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], curVal0x6c0=0x%x, curVal0x6c4=0x%x, curVal0x6c8=0x%x, curVal0x6cc=0x%x !!\n",
+ coex_dm->cur_val0x6c0, coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8, coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1099,9 +1048,9 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0;/* function enable*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, "
- "FW write 0x63=0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63=0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -1109,17 +1058,16 @@ static void btc8723b2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void btc8723b2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPreIgnoreWlanAct = %d, "
- "bCurIgnoreWlanAct = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPreIgnoreWlanAct = %d, bCurIgnoreWlanAct = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1147,11 +1095,11 @@ static void btc8723b2ant_set_fw_ps_tdma(struct btc_coexist *btcoexist, u8 byte1,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
- h2c_parameter[3] << 8 | h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes)=0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 | h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 | h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1260,20 +1208,20 @@ static void btc8723b2ant_set_ant_path(struct btc_coexist *btcoexist,
static void btc8723b2ant_ps_tdma(struct btc_coexist *btcoexist, bool force_exec,
bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type=%d\n",
- (force_exec ? "force to" : ""),
- (turn_on ? "ON" : "OFF"), type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type=%d\n",
+ (force_exec ? "force to" : ""),
+ (turn_on ? "ON" : "OFF"), type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], bPrePsTdmaOn = %d, bCurPsTdmaOn = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], prePsTdma = %d, curPsTdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1471,8 +1419,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non-connected idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non-connected idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1, 0xfffff,
0x0);
@@ -1495,9 +1443,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + "
- "BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1523,9 +1470,8 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
if (bt_hs_on)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + "
- "BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1,
0xfffff, 0x0);
@@ -1549,17 +1495,15 @@ static bool btc8723b2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + "
- "BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
common = false;
} else {
if (bt_hs_on)
return false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + "
- "BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1, 0xfffff, 0x0);
@@ -1597,9 +1541,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 1 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1695,9 +1638,8 @@ static void set_tdma_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 71);
coex_dm->tdma_adj_type = 71;
@@ -1795,9 +1737,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 2 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 6);
coex_dm->tdma_adj_type = 6;
@@ -1878,9 +1819,8 @@ static void set_tdma_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1968,9 +1908,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
{
/* Set PS TDMA for max interval == 3 */
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 7);
coex_dm->tdma_adj_type = 7;
@@ -2051,9 +1990,8 @@ static void set_tdma_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 3);
coex_dm->tdma_adj_type = 3;
@@ -2145,13 +2083,13 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
s32 result;
u8 retry_count = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2255,11 +2193,11 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
} else {
/*accquire the BT TRx retry count from BT_Info byte2*/
retry_count = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
- up, dn, m, n, wait_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up=%d, dn=%d, m=%d, n=%d, wait_count=%d\n",
+ up, dn, m, n, wait_count);
result = 0;
wait_count++;
/* no retry in the last 2-second duration*/
@@ -2276,10 +2214,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi "
- "duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
} /* <=3 retry in the last 2-second duration*/
} else if (retry_count <= 3) {
up--;
@@ -2302,10 +2238,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration "
- "for retry_counter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retry_counter<3!!\n");
}
} else {
if (wait_count == 1)
@@ -2321,13 +2255,12 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration "
- "for retry_counter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retry_counter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
set_tdma_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2341,10 +2274,9 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
*/
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, "
- "curPsTdma=%d, recordPsTdma=%d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, curPsTdma=%d, recordPsTdma=%d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2354,9 +2286,8 @@ static void btc8723b2ant_tdma_duration_adjust(struct btc_coexist *btcoexist,
btc8723b2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
else
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under"
- " progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -2994,27 +2925,26 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
{
u8 algorithm = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), "
- "return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
algorithm = btc8723b2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8723B_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
btc8723b2ant_action_bt_inquiry(btcoexist);
return;
} else {
@@ -3026,84 +2956,75 @@ static void btc8723b2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE, "[BTCoex], Algorithm = %d\n",
+ coex_dm->cur_algorithm);
if (btc8723b2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->auto_tdma_adjust = false;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], preAlgorithm=%d, "
- "curAlgorithm=%d\n", coex_dm->pre_algorithm,
- coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], preAlgorithm=%d, curAlgorithm=%d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->auto_tdma_adjust = false;
}
switch (coex_dm->cur_algorithm) {
case BT_8723B_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
btc8723b2ant_action_sco(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
btc8723b2ant_action_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
btc8723b2ant_action_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
btc8723b2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
btc8723b2ant_action_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
btc8723b2ant_action_pan_hs(btcoexist);
- break;
+ break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
btc8723b2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
btc8723b2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8723b2ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8723B_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
btc8723b2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, "
- "algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
btc8723b2ant_coex_alloff(btcoexist);
break;
}
@@ -3131,8 +3052,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
u8 u8tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
coex_dm->bt_rf0x1e_backup =
btcoexist->btc_get_rf_reg(btcoexist, BTC_RF_A, 0x1e, 0xfffff);
@@ -3157,8 +3078,8 @@ void ex_btc8723b2ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btc8723b2ant_init_coex_dm(btcoexist);
}
@@ -3393,15 +3314,15 @@ void ex_btc8723b2ant_display_coex_info(struct btc_coexist *btcoexist)
void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
btc8723b2ant_coex_alloff(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
ex_btc8723b2ant_init_hwconfig(btcoexist);
btc8723b2ant_init_coex_dm(btcoexist);
@@ -3412,12 +3333,12 @@ void ex_btc8723b2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3425,21 +3346,21 @@ void ex_btc8723b2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_btc8723b2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
else if (BTC_SCAN_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
void ex_btc8723b2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
else if (BTC_ASSOCIATE_FINISH == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
@@ -3450,11 +3371,11 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
/* only 2.4G we need to inform bt the chnl mask */
btcoexist->btc_get(btcoexist,
@@ -3475,10 +3396,10 @@ void ex_btc8723b2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66=0x%x\n",
- h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
- h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66=0x%x\n",
+ h2c_parameter[0] << 16 | h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3487,8 +3408,8 @@ void ex_btc8723b2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type)
{
if (type == BTC_PACKET_DHCP)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
@@ -3506,25 +3427,24 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8723B_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length=%d, hex data=[",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length=%d, hex data=[",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmpbuf[i];
if (i == 1)
bt_info = tmpbuf[i];
if (i == length-1)
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmpbuf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmpbuf[i]);
else
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmpbuf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmpbuf[i]);
}
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "return for Manual CTRL<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), return for Manual CTRL<===\n");
return;
}
@@ -3542,9 +3462,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
because bt is reset and loss of the info.
*/
if ((coex_sta->bt_info_ext & BIT1)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check,"
- " send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
if (wifi_connected)
@@ -3558,9 +3477,8 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
}
if ((coex_sta->bt_info_ext & BIT3)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, "
- "set BT NOT to ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC,
false);
} else {
@@ -3613,28 +3531,26 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info & BT_INFO_8723B_2ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
/* connection exists but no busy */
} else if (bt_info == BT_INFO_8723B_2ANT_B_CONNECTION) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info & BT_INFO_8723B_2ANT_B_SCO_ESCO) ||
(bt_info & BT_INFO_8723B_2ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8723B_2ANT_B_ACL_BUSY) {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8723B_2ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), "
- "BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8723B_2ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -3657,7 +3573,7 @@ void ex_btc8723b2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_btc8723b2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY, "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY, "[BTCoex], Halt notify\n");
btc8723b2ant_wifioff_hwcfg(btcoexist);
btc8723b2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
@@ -3671,33 +3587,31 @@ void ex_btc8723b2ant_periodical(struct btc_coexist *btcoexist)
static u8 dis_ver_info_cnt;
u32 fw_ver = 0, bt_patch_ver = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], =========================="
- "Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************"
- "************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ "
- "Ant Pos = %d/ %d/ %d\n", board_info->pg_ant_num,
- board_info->btdm_ant_num, board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], *****************************"
- "***********************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ fw_ver/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8723b_2ant, glcoex_ver_8723b_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8723B_2ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
index 9cecf174a..3ce47c70b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a1ant.c
@@ -76,28 +76,28 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -106,12 +106,12 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -120,26 +120,26 @@ static u8 halbtc8821a1ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -165,32 +165,28 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -201,14 +197,12 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -218,31 +212,26 @@ static u8 halbtc8821a1ant_WifiRssiState(struct btc_coexist *btcoexist,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_1ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -431,9 +420,9 @@ static void halbtc8821a1ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -504,8 +493,8 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_HS_OPERATION, &bt_hs_on);
if (!bt_link_info->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No BT link exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No BT link exists!!!\n");
return algorithm;
}
@@ -520,26 +509,26 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (bt_link_info->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(HS) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = PAN(EDR) only\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR;
}
}
@@ -547,50 +536,50 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else if (bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -599,29 +588,29 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
if (bt_link_info->sco_exist) {
if (bt_link_info->hid_exist &&
bt_link_info->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID;
} else if (bt_link_info->hid_exist &&
bt_link_info->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
} else if (bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -630,12 +619,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -646,12 +635,12 @@ static u8 halbtc8821a1ant_action_algorithm(struct btc_coexist *btcoexist)
bt_link_info->pan_exist &&
bt_link_info->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! BT Profile = SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT Profile = SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_1ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -670,10 +659,10 @@ static void halbtc8821a1ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -682,17 +671,16 @@ static void halbtc8821a1ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""), ((enable_auto_report) ?
- "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW, "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""), ((enable_auto_report) ?
+ "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -718,9 +706,9 @@ static void btc8821a1ant_set_sw_pen_tx_rate(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9; /*MCS5 or OFDM36*/
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -743,20 +731,20 @@ static void halbtc8821a1ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -764,10 +752,10 @@ static void halbtc8821a1ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
- val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""), val0x6c0, val0x6c4,
+ val0x6c8, val0x6cc);
coex_dm->cur_val_0x6c0 = val0x6c0;
coex_dm->cur_val_0x6c4 = val0x6c4;
coex_dm->cur_val_0x6c8 = val0x6c8;
@@ -839,9 +827,9 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
if (enable)
h2c_parameter[0] |= BIT0; /* function enable*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x63, 1, h2c_parameter);
}
@@ -849,16 +837,16 @@ static void btc8821a1ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -887,13 +875,13 @@ static void halbtc8821a1ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1]<<24 |
- h2c_parameter[2]<<16 |
- h2c_parameter[3]<<8 |
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], PS-TDMA H2C cmd =0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -910,22 +898,22 @@ static void halbtc8821a1ant_set_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
bool force_exec, u8 lps_val, u8 rpwm_val)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
- (force_exec ? "force to" : ""), lps_val, rpwm_val);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set lps/rpwm = 0x%x/0x%x\n",
+ (force_exec ? "force to" : ""), lps_val, rpwm_val);
coex_dm->cur_lps = lps_val;
coex_dm->cur_rpwm = rpwm_val;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
- coex_dm->cur_lps, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RxBeaconMode = 0x%x, LPS-RPWM = 0x%x!!\n",
+ coex_dm->cur_lps, coex_dm->cur_rpwm);
if ((coex_dm->pre_lps == coex_dm->cur_lps) &&
(coex_dm->pre_rpwm == coex_dm->cur_rpwm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
- coex_dm->pre_rpwm, coex_dm->cur_rpwm);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], LPS-RPWM_Last = 0x%x, LPS-RPWM_Now = 0x%x!!\n",
+ coex_dm->pre_rpwm, coex_dm->cur_rpwm);
return;
}
@@ -939,8 +927,8 @@ static void halbtc8821a1ant_lps_rpwm(struct btc_coexist *btcoexist,
static void halbtc8821a1ant_sw_mechanism(struct btc_coexist *btcoexist,
bool low_penalty_ra)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], SM[LpRA] = %d\n", low_penalty_ra);
halbtc8821a1ant_low_penalty_ra(btcoexist, NORMAL_EXEC, low_penalty_ra);
}
@@ -1036,13 +1024,13 @@ static void halbtc8821a1ant_ps_tdma(struct btc_coexist *btcoexist,
if (!force_exec) {
if (coex_dm->cur_ps_tdma_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(off, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(off, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1253,50 +1241,50 @@ static bool halbtc8821a1ant_is_common_action(struct btc_coexist *btcoexist)
if (!wifi_connected &&
BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT non connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT non connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE ==
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi connected + BT connected-idle!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi connected + BT connected-idle!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else if (!wifi_connected &&
(BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE !=
coex_dm->bt_status)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi non connected-idle + BT Busy!!\n");
halbtc8821a1ant_sw_mechanism(btcoexist, false);
common = true;
} else {
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Busy + BT Busy!!\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Connected-Idle + BT Busy!!\n");
}
common = false;
@@ -1313,8 +1301,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
long result;
u8 retry_count = 0, bt_info_ext;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjustForAcl()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjustForAcl()\n");
if ((BT_8821A_1ANT_WIFI_STATUS_NON_CONNECTED_ASSO_AUTH_SCAN ==
wifi_status) ||
@@ -1342,8 +1330,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (!coex_dm->auto_tdma_adjust) {
coex_dm->auto_tdma_adjust = true;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, true, 2);
coex_dm->tdma_adj_type = 2;
@@ -1378,9 +1366,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration*/
@@ -1410,9 +1397,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -1433,8 +1419,8 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
if (result == -1) {
@@ -1479,9 +1465,9 @@ static void btc8821a1ant_tdma_dur_adj(struct btc_coexist *btcoexist,
}
} else {
/*no change*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], ********** TDMA(on, %d) **********\n",
- coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], ********** TDMA(on, %d) **********\n",
+ coex_dm->cur_ps_tdma);
}
if (coex_dm->cur_ps_tdma != 1 &&
@@ -1603,27 +1589,27 @@ static void btc8821a1ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
halbtc8821a1ant_action_wifi_only(btcoexist);
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
if (bt_disabled) {
btcoexist->btc_set(btcoexist, BTC_SET_ACT_LEAVE_LPS,
@@ -1897,15 +1883,15 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
bool scan = false, link = false, roam = false;
bool under_4way = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect()===>\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_4_WAY_PROGRESS, &under_4way);
if (under_4way) {
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under 4way<===\n");
return;
}
@@ -1914,8 +1900,8 @@ static void halbtc8821a1ant_action_wifi_connected(struct btc_coexist *btcoexist)
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_ROAM, &roam);
if (scan || link || roam) {
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], CoexForWifiConnect(), return for wifi is under scan<===\n");
return;
}
@@ -1976,58 +1962,58 @@ static void btc8821a1ant_run_sw_coex_mech(struct btc_coexist *btcoexist)
if (!halbtc8821a1ant_is_common_action(btcoexist)) {
switch (coex_dm->cur_algorithm) {
case BT_8821A_1ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = SCO\n");
halbtc8821a1ant_action_sco(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID\n");
halbtc8821a1ant_action_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP\n");
halbtc8821a1ant_action_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = A2DP+PAN(HS)\n");
halbtc8821a1ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)\n");
halbtc8821a1ant_action_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HS mode\n");
halbtc8821a1ant_action_pan_hs(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN+A2DP\n");
halbtc8821a1ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = PAN(EDR)+HID\n");
halbtc8821a1ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP+PAN\n");
btc8821a1ant_action_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_1ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = HID+A2DP\n");
halbtc8821a1ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action algorithm = coexist All Off!!\n");
/*halbtc8821a1ant_coex_all_off(btcoexist);*/
break;
}
@@ -2045,31 +2031,31 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u8 wifi_rssi_state = BTC_RSSI_STATE_HIGH;
bool wifi_under_5g = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism()===>\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism()===>\n");
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Manual CTRL <===\n");
return;
}
if (btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for Stop Coex DM <===\n");
return;
}
if (coex_sta->under_ips) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is under IPS !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is under IPS !!!\n");
return;
}
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), return for 5G <===\n");
halbtc8821a1ant_coex_under_5g(btcoexist);
return;
}
@@ -2135,8 +2121,8 @@ static void halbtc8821a1ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
if (!wifi_connected) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], wifi is non connected-idle !!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], wifi is non connected-idle !!!\n");
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2168,8 +2154,8 @@ static void halbtc8821a1ant_init_hw_config(struct btc_coexist *btcoexist,
u8 u1_tmp = 0;
bool wifi_under_5g = false;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 1Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 1Ant Init HW Config!!\n");
if (back_up) {
coex_dm->backup_arfr_cnt1 = btcoexist->btc_read_4byte(btcoexist,
@@ -2220,8 +2206,8 @@ void ex_halbtc8821a1ant_init_hwconfig(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_init_coex_dm(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
btcoexist->stop_coex_dm = false;
@@ -2515,8 +2501,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a1ant_set_ant_path(btcoexist,
BTC_ANT_PATH_BT, false, true);
@@ -2525,8 +2511,8 @@ void ex_halbtc8821a1ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_coex_table_with_type(btcoexist,
NORMAL_EXEC, 0);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
halbtc8821a1ant_run_coexist_mechanism(btcoexist);
@@ -2539,12 +2525,12 @@ void ex_halbtc8821a1ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
return;
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_Lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_Lps = false;
}
}
@@ -2574,8 +2560,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
if (!wifi_connected) {
/* non-connected scan*/
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
@@ -2584,8 +2570,8 @@ void ex_halbtc8821a1ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
halbtc8821a1ant_action_wifi_connected_scan(btcoexist);
}
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
if (!wifi_connected) {
/* non-connected scan*/
halbtc8821a1ant_action_wifi_not_connected(btcoexist);
@@ -2614,12 +2600,12 @@ void ex_halbtc8821a1ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
}
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
btc8821a1ant_act_wifi_not_conn_scan(btcoexist);
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED, &wifi_connected);
@@ -2645,11 +2631,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
return;
if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -2672,9 +2658,11 @@ void ex_halbtc8821a1ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -2702,8 +2690,8 @@ void ex_halbtc8821a1ant_special_packet_notify(struct btc_coexist *btcoexist,
if (BTC_PACKET_DHCP == type ||
BTC_PACKET_EAPOL == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], special Packet(%d) notify\n", type);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], special Packet(%d) notify\n", type);
btc8821a1ant_act_wifi_conn_sp_pkt(btcoexist);
}
}
@@ -2727,19 +2715,19 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_1ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -2756,8 +2744,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
/* Here we need to resend some wifi info to BT*/
/* because bt is reset and loss of the info.*/
if (coex_sta->bt_info_ext & BIT1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit1 check, send wifi BW&Chnl to BT!!\n");
btcoexist->btc_get(btcoexist,
BTC_GET_BL_WIFI_CONNECTED,
&wifi_connected);
@@ -2773,8 +2761,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if ((coex_sta->bt_info_ext & BIT3) && !wifi_under_5g) {
if (!btcoexist->manual_control &&
!btcoexist->stop_coex_dm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit3 check, set BT NOT to ignore Wlan active!!\n");
halbtc8821a1ant_ignore_wlan_act(btcoexist,
FORCE_EXEC,
false);
@@ -2782,8 +2770,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
if (!(coex_sta->bt_info_ext & BIT4)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT ext info bit4 check, set BT to enable Auto Report!!\n");
halbtc8821a1ant_bt_auto_report(btcoexist,
FORCE_EXEC, true);
}
@@ -2828,28 +2816,28 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
if (!(bt_info&BT_INFO_8821A_1ANT_B_CONNECTION)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_NON_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Connected idle!!!\n");
} else if (bt_info == BT_INFO_8821A_1ANT_B_CONNECTION) {
/* connection exists but no busy*/
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_CONNECTED_IDLE;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Connected-idle!!!\n");
} else if ((bt_info&BT_INFO_8821A_1ANT_B_SCO_ESCO) ||
(bt_info&BT_INFO_8821A_1ANT_B_SCO_BUSY)) {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_SCO_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT SCO busy!!!\n");
} else if (bt_info&BT_INFO_8821A_1ANT_B_ACL_BUSY) {
if (BT_8821A_1ANT_BT_STATUS_ACL_BUSY != coex_dm->bt_status)
coex_dm->auto_tdma_adjust = false;
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_ACL_BUSY;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT ACL busy!!!\n");
} else {
coex_dm->bt_status = BT_8821A_1ANT_BT_STATUS_MAX;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BtInfoNotify(), BT Non-Defined state!!!\n");
}
if ((BT_8821A_1ANT_BT_STATUS_ACL_BUSY == coex_dm->bt_status) ||
@@ -2866,8 +2854,8 @@ void ex_halbtc8821a1ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Halt notify\n");
btcoexist->stop_coex_dm = true;
@@ -2885,20 +2873,20 @@ void ex_halbtc8821a1ant_halt_notify(struct btc_coexist *btcoexist)
void ex_halbtc8821a1ant_pnp_notify(struct btc_coexist *btcoexist, u8 pnp_state)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify\n");
if (BTC_WIFI_PNP_SLEEP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to SLEEP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to SLEEP\n");
btcoexist->stop_coex_dm = true;
halbtc8821a1ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
halbtc8821a1ant_power_save_state(btcoexist, BTC_PS_WIFI_NATIVE,
0x0, 0x0);
halbtc8821a1ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 9);
} else if (BTC_WIFI_PNP_WAKE_UP == pnp_state) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Pnp notify to WAKE UP\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Pnp notify to WAKE UP\n");
btcoexist->stop_coex_dm = false;
halbtc8821a1ant_init_hw_config(btcoexist, false);
halbtc8821a1ant_init_coex_dm(btcoexist);
@@ -2914,33 +2902,33 @@ ex_halbtc8821a1ant_periodical(
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_1ant,
- glcoex_ver_8821a_1ant,
- fw_ver, bt_patch_ver,
- bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_1ant,
+ glcoex_ver_8821a_1ant,
+ fw_ver, bt_patch_ver,
+ bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
#if (BT_AUTO_REPORT_ONLY_8821A_1ANT == 0)
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
index 044d91429..81f843bba 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtc8821a2ant.c
@@ -80,28 +80,28 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT;
if (bt_rssi >= tmp) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else {
if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi thresh error!!\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi thresh error!!\n");
return coex_sta->pre_bt_rssi_state;
}
@@ -110,12 +110,12 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
if (bt_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Low\n");
}
} else if ((coex_sta->pre_bt_rssi_state ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -125,26 +125,26 @@ static u8 halbtc8821a2ant_bt_rssi_state(u8 level_num, u8 rssi_thresh,
(rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
bt_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to High\n");
} else if (bt_rssi < rssi_thresh) {
bt_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Low\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Low\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at Medium\n");
}
} else {
if (bt_rssi < rssi_thresh1) {
bt_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state switch to Medium\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state switch to Medium\n");
} else {
bt_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_RSSI_STATE,
- "[BTCoex], BT Rssi state stay at High\n");
+ btc_alg_dbg(ALGO_BT_RSSI_STATE,
+ "[BTCoex], BT Rssi state stay at High\n");
}
}
}
@@ -171,32 +171,28 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else {
if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
} else if (level_num == 3) {
if (rssi_thresh > rssi_thresh1) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI thresh error!!\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI thresh error!!\n");
return coex_sta->pre_wifi_rssi_state[index];
}
@@ -207,14 +203,12 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >=
(rssi_thresh+BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Low\n");
}
} else if ((coex_sta->pre_wifi_rssi_state[index] ==
BTC_RSSI_STATE_MEDIUM) ||
@@ -223,31 +217,26 @@ static u8 halbtc8821a2ant_wifi_rssi_state(struct btc_coexist *btcoexist,
if (wifi_rssi >= (rssi_thresh1 +
BTC_RSSI_COEX_THRESH_TOL_8821A_2ANT)) {
wifi_rssi_state = BTC_RSSI_STATE_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to High\n");
} else if (wifi_rssi < rssi_thresh) {
wifi_rssi_state = BTC_RSSI_STATE_LOW;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Low\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Low\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at Medium\n");
}
} else {
if (wifi_rssi < rssi_thresh1) {
wifi_rssi_state = BTC_RSSI_STATE_MEDIUM;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state switch to Medium\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state switch to Medium\n");
} else {
wifi_rssi_state = BTC_RSSI_STATE_STAY_HIGH;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_WIFI_RSSI_STATE,
- "[BTCoex], wifi RSSI state stay at High\n");
+ btc_alg_dbg(ALGO_WIFI_RSSI_STATE,
+ "[BTCoex], wifi RSSI state stay at High\n");
}
}
}
@@ -279,26 +268,26 @@ static void btc8821a2ant_mon_bt_en_dis(struct btc_coexist *btcoexist)
bt_disabled = false;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is enabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is enabled !!\n");
} else {
bt_disable_cnt++;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], bt all counters = 0, %d times!!\n",
- bt_disable_cnt);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], bt all counters = 0, %d times!!\n",
+ bt_disable_cnt);
if (bt_disable_cnt >= 2) {
bt_disabled = true;
btcoexist->btc_set(btcoexist, BTC_SET_BL_BT_DISABLE,
&bt_disabled);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is disabled !!\n");
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is disabled !!\n");
}
}
if (pre_bt_disabled != bt_disabled) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], BT is from %s to %s!!\n",
- (pre_bt_disabled ? "disabled" : "enabled"),
- (bt_disabled ? "disabled" : "enabled"));
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], BT is from %s to %s!!\n",
+ (pre_bt_disabled ? "disabled" : "enabled"),
+ (bt_disabled ? "disabled" : "enabled"));
pre_bt_disabled = bt_disabled;
}
}
@@ -324,12 +313,12 @@ static void halbtc8821a2ant_monitor_bt_ctr(struct btc_coexist *btcoexist)
coex_sta->low_priority_tx = reg_lp_tx;
coex_sta->low_priority_rx = reg_lp_rx;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_BT_MONITOR,
- "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
- reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], High Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_hp_txrx, reg_hp_tx, reg_hp_tx, reg_hp_rx, reg_hp_rx);
+ btc_alg_dbg(ALGO_BT_MONITOR,
+ "[BTCoex], Low Priority Tx/Rx (reg 0x%x) = 0x%x(%d)/0x%x(%d)\n",
+ reg_lp_txrx, reg_lp_tx, reg_lp_tx, reg_lp_rx, reg_lp_rx);
/* reset counter */
btcoexist->btc_write_1byte(btcoexist, 0x76e, 0xc);
@@ -343,9 +332,9 @@ static void halbtc8821a2ant_query_bt_info(struct btc_coexist *btcoexist)
h2c_parameter[0] |= BIT0; /* trigger */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Query Bt Info, FW write 0x61 = 0x%x\n",
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x61, 1, h2c_parameter);
}
@@ -368,8 +357,8 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
stack_info->bt_link_exist = coex_sta->bt_link_exist;
if (!coex_sta->bt_link_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], No profile exists!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], No profile exists!!!\n");
return algorithm;
}
@@ -384,26 +373,26 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (num_of_diff_profile == 1) {
if (coex_sta->sco_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
if (coex_sta->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else if (coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(HS) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(HS) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], PAN(EDR) only\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], PAN(EDR) only\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR;
}
}
@@ -411,50 +400,50 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
} else if (num_of_diff_profile == 2) {
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP ==> SCO\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP ==> SCO\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_SCO;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
} else {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP;
}
}
@@ -463,29 +452,29 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
if (coex_sta->sco_exist) {
if (coex_sta->hid_exist &&
coex_sta->a2dp_exist) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else if (coex_sta->hid_exist &&
coex_sta->pan_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
} else if (coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + A2DP + PAN(EDR) ==> HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -494,12 +483,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(HS)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], HID + A2DP + PAN(EDR)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], HID + A2DP + PAN(EDR)\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR;
}
}
@@ -510,12 +499,12 @@ static u8 halbtc8821a2ant_action_algorithm(struct btc_coexist *btcoexist)
coex_sta->pan_exist &&
coex_sta->a2dp_exist) {
if (bt_hs_on) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Error!!! SCO + HID + A2DP + PAN(HS)\n");
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], SCO + HID + A2DP + PAN(EDR)==>PAN(EDR)+HID\n");
algorithm = BT_8821A_2ANT_COEX_ALGO_PANEDR_HID;
}
}
@@ -544,15 +533,15 @@ static bool halbtc8821a2ant_need_to_dec_bt_pwr(struct btc_coexist *btcoexist)
if (wifi_connected) {
if (bt_hs_on) {
if (bt_hs_rssi > 37) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for HS mode!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for HS mode!!\n");
ret = true;
}
} else {
if ((bt_rssi_state == BTC_RSSI_STATE_HIGH) ||
(bt_rssi_state == BTC_RSSI_STATE_STAY_HIGH)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], Need to decrease bt power for Wifi is connected!!\n");
ret = true;
}
}
@@ -570,10 +559,10 @@ static void btc8821a2ant_set_fw_dac_swing_lev(struct btc_coexist *btcoexist,
*/
h2c_parameter[0] = dac_swing_lvl;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], Set Dac Swing Level = 0x%x\n", dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x64 = 0x%x\n", h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x64, 1, h2c_parameter);
}
@@ -588,9 +577,9 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
if (dec_bt_pwr)
h2c_parameter[0] |= BIT1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
- (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], decrease Bt Power : %s, FW write 0x62 = 0x%x\n",
+ (dec_bt_pwr ? "Yes!!" : "No!!"), h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x62, 1, h2c_parameter);
}
@@ -598,16 +587,16 @@ static void halbtc8821a2ant_set_fw_dec_bt_pwr(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_dec_bt_pwr(struct btc_coexist *btcoexist,
bool force_exec, bool dec_bt_pwr)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s Dec BT power = %s\n",
- (force_exec ? "force to" : ""),
- ((dec_bt_pwr) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s Dec BT power = %s\n",
+ (force_exec ? "force to" : ""),
+ ((dec_bt_pwr) ? "ON" : "OFF"));
coex_dm->cur_dec_bt_pwr = dec_bt_pwr;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
- coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_dec_bt_pwr = %d, cur_dec_bt_pwr = %d\n",
+ coex_dm->pre_dec_bt_pwr, coex_dm->cur_dec_bt_pwr);
if (coex_dm->pre_dec_bt_pwr == coex_dm->cur_dec_bt_pwr)
return;
@@ -627,10 +616,10 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
if (bt_lna_cons_on)
h2c_parameter[1] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
- (bt_lna_cons_on ? "ON!!" : "OFF!!"),
- h2c_parameter[0]<<8|h2c_parameter[1]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set BT LNA Constrain: %s, FW write 0x69 = 0x%x\n",
+ bt_lna_cons_on ? "ON!!" : "OFF!!",
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -638,17 +627,17 @@ static void btc8821a2ant_set_fw_bt_lna_constr(struct btc_coexist *btcoexist,
static void btc8821a2_set_bt_lna_const(struct btc_coexist *btcoexist,
bool force_exec, bool bt_lna_cons_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Constrain = %s\n",
- (force_exec ? "force" : ""),
- ((bt_lna_cons_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Constrain = %s\n",
+ (force_exec ? "force" : ""),
+ ((bt_lna_cons_on) ? "ON" : "OFF"));
coex_dm->cur_bt_lna_constrain = bt_lna_cons_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
- coex_dm->pre_bt_lna_constrain,
- coex_dm->cur_bt_lna_constrain);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_lna_constrain = %d,cur_bt_lna_constrain = %d\n",
+ coex_dm->pre_bt_lna_constrain,
+ coex_dm->cur_bt_lna_constrain);
if (coex_dm->pre_bt_lna_constrain ==
coex_dm->cur_bt_lna_constrain)
@@ -669,10 +658,10 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
h2c_parameter[1] = bt_psd_mode;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
- h2c_parameter[1],
- h2c_parameter[0]<<8|h2c_parameter[1]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set BT PSD mode = 0x%x, FW write 0x69 = 0x%x\n",
+ h2c_parameter[1],
+ h2c_parameter[0] << 8 | h2c_parameter[1]);
btcoexist->btc_fill_h2c(btcoexist, 0x69, 2, h2c_parameter);
}
@@ -680,15 +669,15 @@ static void halbtc8821a2ant_set_fw_bt_psd_mode(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_set_bt_psd_mode(struct btc_coexist *btcoexist,
bool force_exec, u8 bt_psd_mode)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT PSD mode = 0x%x\n",
- (force_exec ? "force" : ""), bt_psd_mode);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT PSD mode = 0x%x\n",
+ (force_exec ? "force" : ""), bt_psd_mode);
coex_dm->cur_bt_psd_mode = bt_psd_mode;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
- coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_psd_mode = 0x%x, cur_bt_psd_mode = 0x%x\n",
+ coex_dm->pre_bt_psd_mode, coex_dm->cur_bt_psd_mode);
if (coex_dm->pre_bt_psd_mode == coex_dm->cur_bt_psd_mode)
return;
@@ -709,10 +698,10 @@ static void halbtc8821a2ant_set_bt_auto_report(struct btc_coexist *btcoexist,
if (enable_auto_report)
h2c_parameter[0] |= BIT0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
- (enable_auto_report ? "Enabled!!" : "Disabled!!"),
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], BT FW auto report : %s, FW write 0x68 = 0x%x\n",
+ (enable_auto_report ? "Enabled!!" : "Disabled!!"),
+ h2c_parameter[0]);
btcoexist->btc_fill_h2c(btcoexist, 0x68, 1, h2c_parameter);
}
@@ -721,17 +710,17 @@ static void halbtc8821a2ant_bt_auto_report(struct btc_coexist *btcoexist,
bool force_exec,
bool enable_auto_report)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s BT Auto report = %s\n",
- (force_exec ? "force to" : ""),
- ((enable_auto_report) ? "Enabled" : "Disabled"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s BT Auto report = %s\n",
+ (force_exec ? "force to" : ""),
+ ((enable_auto_report) ? "Enabled" : "Disabled"));
coex_dm->cur_bt_auto_report = enable_auto_report;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
- coex_dm->pre_bt_auto_report,
- coex_dm->cur_bt_auto_report);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_bt_auto_report = %d, cur_bt_auto_report = %d\n",
+ coex_dm->pre_bt_auto_report,
+ coex_dm->cur_bt_auto_report);
if (coex_dm->pre_bt_auto_report == coex_dm->cur_bt_auto_report)
return;
@@ -746,16 +735,16 @@ static void halbtc8821a2ant_fw_dac_swing_lvl(struct btc_coexist *btcoexist,
bool force_exec,
u8 fw_dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s set FW Dac Swing level = %d\n",
- (force_exec ? "force to" : ""), fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s set FW Dac Swing level = %d\n",
+ (force_exec ? "force to" : ""), fw_dac_swing_lvl);
coex_dm->cur_fw_dac_swing_lvl = fw_dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
- coex_dm->pre_fw_dac_swing_lvl,
- coex_dm->cur_fw_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_fw_dac_swing_lvl = %d, cur_fw_dac_swing_lvl = %d\n",
+ coex_dm->pre_fw_dac_swing_lvl,
+ coex_dm->cur_fw_dac_swing_lvl);
if (coex_dm->pre_fw_dac_swing_lvl ==
coex_dm->cur_fw_dac_swing_lvl)
@@ -773,8 +762,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
{
if (rx_rf_shrink_on) {
/* Shrink RF Rx LPF corner */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Shrink RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Shrink RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A, 0x1e,
0xfffff, 0xffffc);
} else {
@@ -782,8 +771,8 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
* After initialized, we can use coex_dm->bt_rf0x1e_backup
*/
if (btcoexist->initilized) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Resume RF Rx LPF corner!!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Resume RF Rx LPF corner!!\n");
btcoexist->btc_set_rf_reg(btcoexist, BTC_RF_A,
0x1e, 0xfffff,
coex_dm->bt_rf0x1e_backup);
@@ -794,17 +783,17 @@ static void btc8821a2ant_set_sw_rf_rx_lpf_corner(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_RfShrink(struct btc_coexist *btcoexist,
bool force_exec, bool rx_rf_shrink_on)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn Rx RF Shrink = %s\n",
- (force_exec ? "force to" : ""),
- ((rx_rf_shrink_on) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn Rx RF Shrink = %s\n",
+ (force_exec ? "force to" : ""),
+ ((rx_rf_shrink_on) ? "ON" : "OFF"));
coex_dm->cur_rf_rx_lpf_shrink = rx_rf_shrink_on;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
- coex_dm->pre_rf_rx_lpf_shrink,
- coex_dm->cur_rf_rx_lpf_shrink);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_rf_rx_lpf_shrink = %d, cur_rf_rx_lpf_shrink = %d\n",
+ coex_dm->pre_rf_rx_lpf_shrink,
+ coex_dm->cur_rf_rx_lpf_shrink);
if (coex_dm->pre_rf_rx_lpf_shrink ==
coex_dm->cur_rf_rx_lpf_shrink)
@@ -835,9 +824,9 @@ static void btc8821a2ant_SetSwPenTxRateAdapt(struct btc_coexist *btcoexist,
h2c_parameter[5] = 0xf9;
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set WiFi Low-Penalty Retry: %s",
- (low_penalty_ra ? "ON!!" : "OFF!!"));
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set WiFi Low-Penalty Retry: %s",
+ (low_penalty_ra ? "ON!!" : "OFF!!"));
btcoexist->btc_fill_h2c(btcoexist, 0x69, 6, h2c_parameter);
}
@@ -846,17 +835,17 @@ static void halbtc8821a2ant_low_penalty_ra(struct btc_coexist *btcoexist,
bool force_exec, bool low_penalty_ra)
{
/*return;*/
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn LowPenaltyRA = %s\n",
- (force_exec ? "force to" : ""),
- ((low_penalty_ra) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn LowPenaltyRA = %s\n",
+ (force_exec ? "force to" : ""),
+ ((low_penalty_ra) ? "ON" : "OFF"));
coex_dm->cur_low_penalty_ra = low_penalty_ra;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
- coex_dm->pre_low_penalty_ra,
- coex_dm->cur_low_penalty_ra);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_low_penalty_ra = %d, cur_low_penalty_ra = %d\n",
+ coex_dm->pre_low_penalty_ra,
+ coex_dm->cur_low_penalty_ra);
if (coex_dm->pre_low_penalty_ra == coex_dm->cur_low_penalty_ra)
return;
@@ -872,8 +861,8 @@ static void halbtc8821a2ant_set_dac_swing_reg(struct btc_coexist *btcoexist,
{
u8 val = (u8)level;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], Write SwDacSwing = 0x%x\n", level);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], Write SwDacSwing = 0x%x\n", level);
btcoexist->btc_write_1byte_bitmask(btcoexist, 0xc5b, 0x3e, val);
}
@@ -891,21 +880,21 @@ static void halbtc8821a2ant_dac_swing(struct btc_coexist *btcoexist,
bool force_exec, bool dac_swing_on,
u32 dac_swing_lvl)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
- (force_exec ? "force to" : ""),
- ((dac_swing_on) ? "ON" : "OFF"),
- dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn DacSwing = %s, dac_swing_lvl = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ ((dac_swing_on) ? "ON" : "OFF"),
+ dac_swing_lvl);
coex_dm->cur_dac_swing_on = dac_swing_on;
coex_dm->cur_dac_swing_lvl = dac_swing_lvl;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
- coex_dm->pre_dac_swing_on,
- coex_dm->pre_dac_swing_lvl,
- coex_dm->cur_dac_swing_on,
- coex_dm->cur_dac_swing_lvl);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_dac_swing_on = %d, pre_dac_swing_lvl = 0x%x, cur_dac_swing_on = %d, cur_dac_swing_lvl = 0x%x\n",
+ coex_dm->pre_dac_swing_on,
+ coex_dm->pre_dac_swing_lvl,
+ coex_dm->cur_dac_swing_on,
+ coex_dm->cur_dac_swing_lvl);
if ((coex_dm->pre_dac_swing_on == coex_dm->cur_dac_swing_on) &&
(coex_dm->pre_dac_swing_lvl ==
@@ -924,12 +913,12 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
bool adc_back_off)
{
if (adc_back_off) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level On!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level On!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x3);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], BB BackOff Level Off!\n");
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], BB BackOff Level Off!\n");
btcoexist->btc_write_1byte_bitmask(btcoexist, 0x8db, 0x60, 0x1);
}
}
@@ -937,16 +926,17 @@ static void halbtc8821a2ant_set_adc_back_off(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_adc_back_off(struct btc_coexist *btcoexist,
bool force_exec, bool adc_back_off)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s turn AdcBackOff = %s\n",
- (force_exec ? "force to" : ""),
- ((adc_back_off) ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s turn AdcBackOff = %s\n",
+ (force_exec ? "force to" : ""),
+ ((adc_back_off) ? "ON" : "OFF"));
coex_dm->cur_adc_back_off = adc_back_off;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
- coex_dm->pre_adc_back_off, coex_dm->cur_adc_back_off);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_adc_back_off = %d, cur_adc_back_off = %d\n",
+ coex_dm->pre_adc_back_off,
+ coex_dm->cur_adc_back_off);
if (coex_dm->pre_adc_back_off == coex_dm->cur_adc_back_off)
return;
@@ -960,20 +950,20 @@ static void halbtc8821a2ant_set_coex_table(struct btc_coexist *btcoexist,
u32 val0x6c0, u32 val0x6c4,
u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c0 = 0x%x\n", val0x6c0);
btcoexist->btc_write_4byte(btcoexist, 0x6c0, val0x6c0);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c4 = 0x%x\n", val0x6c4);
btcoexist->btc_write_4byte(btcoexist, 0x6c4, val0x6c4);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6c8 = 0x%x\n", val0x6c8);
btcoexist->btc_write_4byte(btcoexist, 0x6c8, val0x6c8);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_EXEC,
- "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_EXEC,
+ "[BTCoex], set coex table, set 0x6cc = 0x%x\n", val0x6cc);
btcoexist->btc_write_1byte(btcoexist, 0x6cc, val0x6cc);
}
@@ -981,28 +971,28 @@ static void halbtc8821a2ant_coex_table(struct btc_coexist *btcoexist,
bool force_exec, u32 val0x6c0,
u32 val0x6c4, u32 val0x6c8, u8 val0x6cc)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW,
- "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
- (force_exec ? "force to" : ""),
- val0x6c0, val0x6c4, val0x6c8, val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW,
+ "[BTCoex], %s write Coex Table 0x6c0 = 0x%x, 0x6c4 = 0x%x, 0x6c8 = 0x%x, 0x6cc = 0x%x\n",
+ (force_exec ? "force to" : ""),
+ val0x6c0, val0x6c4, val0x6c8, val0x6cc);
coex_dm->cur_val0x6c0 = val0x6c0;
coex_dm->cur_val0x6c4 = val0x6c4;
coex_dm->cur_val0x6c8 = val0x6c8;
coex_dm->cur_val0x6cc = val0x6cc;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
- coex_dm->pre_val0x6c0,
- coex_dm->pre_val0x6c4,
- coex_dm->pre_val0x6c8,
- coex_dm->pre_val0x6cc);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_SW_DETAIL,
- "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
- coex_dm->cur_val0x6c0,
- coex_dm->cur_val0x6c4,
- coex_dm->cur_val0x6c8,
- coex_dm->cur_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], pre_val0x6c0 = 0x%x, pre_val0x6c4 = 0x%x, pre_val0x6c8 = 0x%x, pre_val0x6cc = 0x%x !!\n",
+ coex_dm->pre_val0x6c0,
+ coex_dm->pre_val0x6c4,
+ coex_dm->pre_val0x6c8,
+ coex_dm->pre_val0x6cc);
+ btc_alg_dbg(ALGO_TRACE_SW_DETAIL,
+ "[BTCoex], cur_val0x6c0 = 0x%x, cur_val0x6c4 = 0x%x, cur_val0x6c8 = 0x%x, cur_val0x6cc = 0x%x !!\n",
+ coex_dm->cur_val0x6c0,
+ coex_dm->cur_val0x6c4,
+ coex_dm->cur_val0x6c8,
+ coex_dm->cur_val0x6cc);
if ((coex_dm->pre_val0x6c0 == coex_dm->cur_val0x6c0) &&
(coex_dm->pre_val0x6c4 == coex_dm->cur_val0x6c4) &&
@@ -1027,9 +1017,9 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
if (enable)
h2c_parameter[0] |= BIT0;/* function enable */
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
- h2c_parameter[0]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], set FW for BT Ignore Wlan_Act, FW write 0x63 = 0x%x\n",
+ h2c_parameter[0]);
btcoex->btc_fill_h2c(btcoex, 0x63, 1, h2c_parameter);
}
@@ -1037,16 +1027,16 @@ static void halbtc8821a2ant_set_fw_ignore_wlan_act(struct btc_coexist *btcoex,
static void halbtc8821a2ant_ignore_wlan_act(struct btc_coexist *btcoexist,
bool force_exec, bool enable)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn Ignore WlanAct %s\n",
- (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn Ignore WlanAct %s\n",
+ (force_exec ? "force to" : ""), (enable ? "ON" : "OFF"));
coex_dm->cur_ignore_wlan_act = enable;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
- coex_dm->pre_ignore_wlan_act,
- coex_dm->cur_ignore_wlan_act);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ignore_wlan_act = %d, cur_ignore_wlan_act = %d!!\n",
+ coex_dm->pre_ignore_wlan_act,
+ coex_dm->cur_ignore_wlan_act);
if (coex_dm->pre_ignore_wlan_act ==
coex_dm->cur_ignore_wlan_act)
@@ -1075,13 +1065,13 @@ static void halbtc8821a2ant_set_fw_pstdma(struct btc_coexist *btcoexist,
coex_dm->ps_tdma_para[3] = byte4;
coex_dm->ps_tdma_para[4] = byte5;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
- h2c_parameter[0],
- h2c_parameter[1]<<24|
- h2c_parameter[2]<<16|
- h2c_parameter[3]<<8|
- h2c_parameter[4]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x60(5bytes) = 0x%x%08x\n",
+ h2c_parameter[0],
+ h2c_parameter[1] << 24 |
+ h2c_parameter[2] << 16 |
+ h2c_parameter[3] << 8 |
+ h2c_parameter[4]);
btcoexist->btc_fill_h2c(btcoexist, 0x60, 5, h2c_parameter);
}
@@ -1175,20 +1165,20 @@ static void halbtc8821a2ant_set_ant_path(struct btc_coexist *btcoexist,
static void halbtc8821a2ant_ps_tdma(struct btc_coexist *btcoexist,
bool force_exec, bool turn_on, u8 type)
{
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], %s turn %s PS TDMA, type = %d\n",
- (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
- type);
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], %s turn %s PS TDMA, type = %d\n",
+ (force_exec ? "force to" : ""), (turn_on ? "ON" : "OFF"),
+ type);
coex_dm->cur_ps_tdma_on = turn_on;
coex_dm->cur_ps_tdma = type;
if (!force_exec) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
- coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
- coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ps_tdma_on = %d, cur_ps_tdma_on = %d!!\n",
+ coex_dm->pre_ps_tdma_on, coex_dm->cur_ps_tdma_on);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], pre_ps_tdma = %d, cur_ps_tdma = %d!!\n",
+ coex_dm->pre_ps_tdma, coex_dm->cur_ps_tdma);
if ((coex_dm->pre_ps_tdma_on == coex_dm->cur_ps_tdma_on) &&
(coex_dm->pre_ps_tdma == coex_dm->cur_ps_tdma))
@@ -1374,8 +1364,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1392,13 +1382,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT IPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT IPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1416,8 +1406,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist, BTC_SET_ACT_DISABLE_LOW_POWER,
&low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1433,13 +1423,13 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT LPS!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT LPS!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
false, 1);
}
@@ -1458,8 +1448,8 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
btcoexist->btc_set(btcoexist,
BTC_SET_ACT_DISABLE_LOW_POWER, &low_pwr_disable);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi IPS + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi IPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, false, 1);
halbtc8821a2ant_fw_dac_swing_lvl(btcoexist, NORMAL_EXEC, 6);
@@ -1478,12 +1468,12 @@ static bool halbtc8821a2ant_is_common_action(struct btc_coexist *btcoexist)
&low_pwr_disable);
if (wifi_busy) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi Busy + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi Busy + BT Busy!!\n");
common = false;
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Wifi LPS + BT Busy!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Wifi LPS + BT Busy!!\n");
halbtc8821a2ant_ps_tdma(btcoexist,
NORMAL_EXEC, true, 21);
@@ -1505,8 +1495,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 71) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
@@ -1601,8 +1591,8 @@ static void btc8821a2_int1(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 71);
@@ -1706,8 +1696,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 6);
@@ -1796,8 +1786,8 @@ static void btc8821a2_int2(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 2);
@@ -1892,8 +1882,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
int result)
{
if (tx_pause) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 1\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 1\n");
if (coex_dm->cur_ps_tdma == 1) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 7);
@@ -1982,8 +1972,8 @@ static void btc8821a2_int3(struct btc_coexist *btcoexist, bool tx_pause,
}
}
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], TxPause = 0\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], TxPause = 0\n");
if (coex_dm->cur_ps_tdma == 5) {
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC,
true, 3);
@@ -2085,13 +2075,13 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
int result;
u8 retry_count = 0;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW,
- "[BTCoex], TdmaDurationAdjust()\n");
+ btc_alg_dbg(ALGO_TRACE_FW,
+ "[BTCoex], TdmaDurationAdjust()\n");
if (coex_dm->reset_tdma_adjust) {
coex_dm->reset_tdma_adjust = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], first run TdmaDurationAdjust()!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], first run TdmaDurationAdjust()!!\n");
if (sco_hid) {
if (tx_pause) {
if (max_interval == 1) {
@@ -2195,11 +2185,11 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
} else {
/* accquire the BT TRx retry count from BT_Info byte2 */
retry_count = coex_sta->bt_retry_cnt;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], retry_count = %d\n", retry_count);
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
- (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], retry_count = %d\n", retry_count);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], up = %d, dn = %d, m = %d, n = %d, wait_count = %d\n",
+ (int)up, (int)dn, (int)m, (int)n, (int)wait_count);
result = 0;
wait_count++;
@@ -2220,9 +2210,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
up = 0;
dn = 0;
result = 1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Increase wifi duration!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Increase wifi duration!!\n");
}
} else if (retry_count <= 3) {
/* <=3 retry in the last 2-second duration */
@@ -2251,9 +2240,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM,
- ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter<3!!\n");
}
} else {
/* retry count > 3, if retry count > 3 happens once,
@@ -2274,12 +2262,12 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
dn = 0;
wait_count = 0;
result = -1;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], Decrease wifi duration for retryCounter>3!!\n");
}
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], max Interval = %d\n", max_interval);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], max Interval = %d\n", max_interval);
if (max_interval == 1)
btc8821a2_int1(btcoexist, tx_pause, result);
else if (max_interval == 2)
@@ -2295,9 +2283,9 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
if (coex_dm->cur_ps_tdma != coex_dm->tdma_adj_type) {
bool scan = false, link = false, roam = false;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
- coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], PsTdma type dismatch!!!, cur_ps_tdma = %d, recordPsTdma = %d\n",
+ coex_dm->cur_ps_tdma, coex_dm->tdma_adj_type);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_SCAN, &scan);
btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_LINK, &link);
@@ -2307,8 +2295,8 @@ static void btc8821a2ant_tdma_dur_adj(struct btc_coexist *btcoexist,
halbtc8821a2ant_ps_tdma(btcoexist, NORMAL_EXEC, true,
coex_dm->tdma_adj_type);
} else {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_DETAIL,
- "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
+ btc_alg_dbg(ALGO_TRACE_FW_DETAIL,
+ "[BTCoex], roaming/link/scan is under progress, will adjust next time!!!\n");
}
}
@@ -3183,8 +3171,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
u8 algorithm = 0;
if (btcoexist->manual_control) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Manual control!!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Manual control!!!\n");
return;
}
@@ -3192,8 +3180,8 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
BTC_GET_BL_WIFI_UNDER_5G, &wifi_under_5g);
if (wifi_under_5g) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], RunCoexistMechanism(), run 5G coex setting!!<===\n");
halbtc8821a2ant_coex_under_5g(btcoexist);
return;
}
@@ -3201,81 +3189,82 @@ static void halbtc8821a2ant_run_coexist_mechanism(struct btc_coexist *btcoexist)
algorithm = halbtc8821a2ant_action_algorithm(btcoexist);
if (coex_sta->c2h_bt_inquiry_page &&
(BT_8821A_2ANT_COEX_ALGO_PANHS != algorithm)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], BT is under inquiry/page scan !!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], BT is under inquiry/page scan !!\n");
halbtc8821a2ant_bt_inquiry_page(btcoexist);
return;
}
coex_dm->cur_algorithm = algorithm;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Algorithm = %d\n", coex_dm->cur_algorithm);
if (halbtc8821a2ant_is_common_action(btcoexist)) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant common.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant common\n");
coex_dm->reset_tdma_adjust = true;
} else {
if (coex_dm->cur_algorithm != coex_dm->pre_algorithm) {
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
- coex_dm->pre_algorithm, coex_dm->cur_algorithm);
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], pre_algorithm = %d, cur_algorithm = %d\n",
+ coex_dm->pre_algorithm,
+ coex_dm->cur_algorithm);
coex_dm->reset_tdma_adjust = true;
}
switch (coex_dm->cur_algorithm) {
case BT_8821A_2ANT_COEX_ALGO_SCO:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = SCO.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = SCO\n");
halbtc8821a2ant_action_sco(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID\n");
halbtc8821a2ant_action_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP\n");
halbtc8821a2ant_action_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_A2DP_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = A2DP+PAN(HS)\n");
halbtc8821a2ant_action_a2dp_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR).\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)\n");
halbtc8821a2ant_action_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANHS:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HS mode.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HS mode\n");
halbtc8821a2ant_action_pan_hs(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN+A2DP\n");
halbtc8821a2ant_action_pan_edr_a2dp(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_PANEDR_HID:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = PAN(EDR)+HID\n");
halbtc8821a2ant_action_pan_edr_hid(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP_PANEDR:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP+PAN\n");
btc8821a2ant_act_hid_a2dp_pan_edr(btcoexist);
break;
case BT_8821A_2ANT_COEX_ALGO_HID_A2DP:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = HID+A2DP.\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = HID+A2DP\n");
halbtc8821a2ant_action_hid_a2dp(btcoexist);
break;
default:
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], Action 2-Ant, algorithm = coexist All Off!!\n");
halbtc8821a2ant_coex_all_off(btcoexist);
break;
}
@@ -3294,8 +3283,8 @@ void ex_halbtc8821a2ant_init_hwconfig(struct btc_coexist *btcoexist)
{
u8 u1tmp = 0;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], 2Ant Init HW Config!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], 2Ant Init HW Config!!\n");
/* backup rf 0x1e value */
coex_dm->bt_rf0x1e_backup =
@@ -3328,8 +3317,8 @@ ex_halbtc8821a2ant_init_coex_dm(
struct btc_coexist *btcoexist
)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Coex Mechanism Init!!\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Coex Mechanism Init!!\n");
halbtc8821a2ant_init_coex_dm(btcoexist);
}
@@ -3574,13 +3563,13 @@ ex_halbtc8821a2ant_display_coex_info(
void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_IPS_ENTER == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS ENTER notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS ENTER notify\n");
coex_sta->under_ips = true;
halbtc8821a2ant_coex_all_off(btcoexist);
} else if (BTC_IPS_LEAVE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], IPS LEAVE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], IPS LEAVE notify\n");
coex_sta->under_ips = false;
/*halbtc8821a2ant_init_coex_dm(btcoexist);*/
}
@@ -3589,12 +3578,12 @@ void ex_halbtc8821a2ant_ips_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_LPS_ENABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS ENABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS ENABLE notify\n");
coex_sta->under_lps = true;
} else if (BTC_LPS_DISABLE == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], LPS DISABLE notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], LPS DISABLE notify\n");
coex_sta->under_lps = false;
}
}
@@ -3602,22 +3591,22 @@ void ex_halbtc8821a2ant_lps_notify(struct btc_coexist *btcoexist, u8 type)
void ex_halbtc8821a2ant_scan_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_SCAN_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN START notify\n");
} else if (BTC_SCAN_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], SCAN FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], SCAN FINISH notify\n");
}
}
void ex_halbtc8821a2ant_connect_notify(struct btc_coexist *btcoexist, u8 type)
{
if (BTC_ASSOCIATE_START == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT START notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT START notify\n");
} else if (BTC_ASSOCIATE_FINISH == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], CONNECT FINISH notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], CONNECT FINISH notify\n");
}
}
@@ -3629,11 +3618,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
u8 wifi_central_chnl;
if (BTC_MEDIA_CONNECT == type) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA connect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA connect notify\n");
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], MEDIA disconnect notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], MEDIA disconnect notify\n");
}
/* only 2.4G we need to inform bt the chnl mask*/
@@ -3654,9 +3643,11 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
coex_dm->wifi_chnl_info[1] = h2c_parameter[1];
coex_dm->wifi_chnl_info[2] = h2c_parameter[2];
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC,
- "[BTCoex], FW write 0x66 = 0x%x\n",
- h2c_parameter[0]<<16|h2c_parameter[1]<<8|h2c_parameter[2]);
+ btc_alg_dbg(ALGO_TRACE_FW_EXEC,
+ "[BTCoex], FW write 0x66 = 0x%x\n",
+ h2c_parameter[0] << 16 |
+ h2c_parameter[1] << 8 |
+ h2c_parameter[2]);
btcoexist->btc_fill_h2c(btcoexist, 0x66, 3, h2c_parameter);
}
@@ -3664,8 +3655,8 @@ void ex_halbtc8821a2ant_media_status_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a2ant_special_packet_notify(struct btc_coexist *btcoexist,
u8 type) {
if (type == BTC_PACKET_DHCP) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], DHCP Packet notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], DHCP Packet notify\n");
}
}
@@ -3685,19 +3676,19 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
rsp_source = BT_INFO_SRC_8821A_2ANT_WIFI_FW;
coex_sta->bt_info_c2h_cnt[rsp_source]++;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Bt info[%d], length = %d, hex data = [",
- rsp_source, length);
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Bt info[%d], length = %d, hex data = [",
+ rsp_source, length);
for (i = 0; i < length; i++) {
coex_sta->bt_info_c2h[rsp_source][i] = tmp_buf[i];
if (i == 1)
bt_info = tmp_buf[i];
if (i == length-1) {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x]\n", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x]\n", tmp_buf[i]);
} else {
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "0x%02x, ", tmp_buf[i]);
+ btc_iface_dbg(INTF_NOTIFY,
+ "0x%02x, ", tmp_buf[i]);
}
}
@@ -3823,8 +3814,8 @@ void ex_halbtc8821a2ant_bt_info_notify(struct btc_coexist *btcoexist,
void ex_halbtc8821a2ant_halt_notify(struct btc_coexist *btcoexist)
{
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_NOTIFY,
- "[BTCoex], Halt notify\n");
+ btc_iface_dbg(INTF_NOTIFY,
+ "[BTCoex], Halt notify\n");
halbtc8821a2ant_ignore_wlan_act(btcoexist, FORCE_EXEC, true);
ex_halbtc8821a2ant_media_status_notify(btcoexist, BTC_MEDIA_DISCONNECT);
@@ -3837,31 +3828,31 @@ void ex_halbtc8821a2ant_periodical(struct btc_coexist *btcoexist)
struct btc_board_info *board_info = &btcoexist->board_info;
struct btc_stack_info *stack_info = &btcoexist->stack_info;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "[BTCoex], ==========================Periodical===========================\n");
+ btc_alg_dbg(ALGO_TRACE,
+ "[BTCoex], ==========================Periodical===========================\n");
if (dis_ver_info_cnt <= 5) {
dis_ver_info_cnt += 1;
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
- board_info->pg_ant_num,
- board_info->btdm_ant_num,
- board_info->btdm_ant_pos);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
- ((stack_info->profile_notified) ? "Yes" : "No"),
- stack_info->hci_version);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], Ant PG Num/ Ant Mech/ Ant Pos = %d/ %d/ %d\n",
+ board_info->pg_ant_num,
+ board_info->btdm_ant_num,
+ board_info->btdm_ant_pos);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], BT stack/ hci ext ver = %s / %d\n",
+ stack_info->profile_notified ? "Yes" : "No",
+ stack_info->hci_version);
btcoexist->btc_get(btcoexist, BTC_GET_U4_BT_PATCH_VER,
&bt_patch_ver);
btcoexist->btc_get(btcoexist, BTC_GET_U4_WIFI_FW_VER, &fw_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
- glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
- fw_ver, bt_patch_ver, bt_patch_ver);
- BTC_PRINT(BTC_MSG_INTERFACE, INTF_INIT,
- "[BTCoex], ****************************************************************\n");
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], CoexVer/ FwVer/ PatchVer = %d_%x/ 0x%x/ 0x%x(%d)\n",
+ glcoex_ver_date_8821a_2ant, glcoex_ver_8821a_2ant,
+ fw_ver, bt_patch_ver, bt_patch_ver);
+ btc_iface_dbg(INTF_INIT,
+ "[BTCoex], ****************************************************************\n");
}
halbtc8821a2ant_query_bt_info(btcoexist);
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
index babd1490f..b660c214d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c
@@ -141,8 +141,8 @@ static u8 halbtc_get_wifi_central_chnl(struct btc_coexist *btcoexist)
if (rtlphy->current_channel != 0)
chnl = rtlphy->current_channel;
- BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE,
- "static halbtc_get_wifi_central_chnl:%d\n", chnl);
+ btc_alg_dbg(ALGO_TRACE,
+ "static halbtc_get_wifi_central_chnl:%d\n", chnl);
return chnl;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
index f41ca57dd..3cbe34c53 100644
--- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
+++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.h
@@ -116,12 +116,17 @@ extern u32 btc_dbg_type[];
#define WIFI_P2P_GO_CONNECTED BIT3
#define WIFI_P2P_GC_CONNECTED BIT4
-#define BTC_PRINT(dbgtype, dbgflag, printstr, ...) \
- do { \
- if (unlikely(btc_dbg_type[dbgtype] & dbgflag)) {\
- printk(printstr, ##__VA_ARGS__); \
- } \
- } while (0)
+#define btc_alg_dbg(dbgflag, fmt, ...) \
+do { \
+ if (unlikely(btc_dbg_type[BTC_MSG_ALGORITHM] & dbgflag)) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
+#define btc_iface_dbg(dbgflag, fmt, ...) \
+do { \
+ if (unlikely(btc_dbg_type[BTC_MSG_INTERFACE] & dbgflag)) \
+ printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
+} while (0)
+
#define BTC_RSSI_HIGH(_rssi_) \
((_rssi_ == BTC_RSSI_STATE_HIGH || \
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index e5037d13b..d12586d4f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -359,30 +359,28 @@ static bool rtl_pci_check_buddy_priv(struct ieee80211_hw *hw,
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
bool find_buddy_priv = false;
- struct rtl_priv *tpriv = NULL;
+ struct rtl_priv *tpriv;
struct rtl_pci_priv *tpcipriv = NULL;
if (!list_empty(&rtlpriv->glb_var->glb_priv_list)) {
list_for_each_entry(tpriv, &rtlpriv->glb_var->glb_priv_list,
list) {
- if (tpriv) {
- tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "pcipriv->ndis_adapter.funcnumber %x\n",
- pcipriv->ndis_adapter.funcnumber);
- RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "tpcipriv->ndis_adapter.funcnumber %x\n",
- tpcipriv->ndis_adapter.funcnumber);
-
- if ((pcipriv->ndis_adapter.busnumber ==
- tpcipriv->ndis_adapter.busnumber) &&
- (pcipriv->ndis_adapter.devnumber ==
- tpcipriv->ndis_adapter.devnumber) &&
- (pcipriv->ndis_adapter.funcnumber !=
- tpcipriv->ndis_adapter.funcnumber)) {
- find_buddy_priv = true;
- break;
- }
+ tpcipriv = (struct rtl_pci_priv *)tpriv->priv;
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "pcipriv->ndis_adapter.funcnumber %x\n",
+ pcipriv->ndis_adapter.funcnumber);
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
+ "tpcipriv->ndis_adapter.funcnumber %x\n",
+ tpcipriv->ndis_adapter.funcnumber);
+
+ if ((pcipriv->ndis_adapter.busnumber ==
+ tpcipriv->ndis_adapter.busnumber) &&
+ (pcipriv->ndis_adapter.devnumber ==
+ tpcipriv->ndis_adapter.devnumber) &&
+ (pcipriv->ndis_adapter.funcnumber !=
+ tpcipriv->ndis_adapter.funcnumber)) {
+ find_buddy_priv = true;
+ break;
}
}
}
@@ -1213,7 +1211,8 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
/*Tx/Rx related var */
_rtl_pci_init_trx_var(hw);
- /*IBSS*/ mac->beacon_interval = 100;
+ /*IBSS*/
+ mac->beacon_interval = 100;
/*AMPDU*/
mac->min_space_cfg = 0;
@@ -2457,7 +2456,7 @@ int rtl_pci_resume(struct device *dev)
EXPORT_SYMBOL(rtl_pci_resume);
#endif /* CONFIG_PM_SLEEP */
-struct rtl_intf_ops rtl_pci_ops = {
+const struct rtl_intf_ops rtl_pci_ops = {
.read_efuse_byte = read_efuse_byte,
.adapter_start = rtl_pci_start,
.adapter_stop = rtl_pci_stop,
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.h b/drivers/net/wireless/realtek/rtlwifi/pci.h
index 5da670394..b951ebac1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.h
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.h
@@ -286,7 +286,7 @@ struct rtl_pci_priv {
int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
-extern struct rtl_intf_ops rtl_pci_ops;
+extern const struct rtl_intf_ops rtl_pci_ops;
int rtl_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index b69321d45..93579cac0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -443,14 +443,10 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
- /* Idle for a while if we connect to AP a while ago. */
- if (mac->cnt_after_linked >= 2) {
- if (ppsc->dot11_psmode == EACTIVE) {
- RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
- "Enter 802.11 power save mode...\n");
-
- rtl_lps_set_psmode(hw, EAUTOPS);
- }
+ if (ppsc->dot11_psmode == EACTIVE) {
+ RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Enter 802.11 power save mode...\n");
+ rtl_lps_set_psmode(hw, EAUTOPS);
}
spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
index 5be34118e..3524441fd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
+++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
@@ -154,13 +154,13 @@ static bool _rtl_is_radar_freq(u16 center_freq)
static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
enum nl80211_reg_initiator initiator)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
const struct ieee80211_reg_rule *reg_rule;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
@@ -210,9 +210,9 @@ static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
struct ieee80211_channel *ch;
const struct ieee80211_reg_rule *reg_rule;
- if (!wiphy->bands[IEEE80211_BAND_2GHZ])
+ if (!wiphy->bands[NL80211_BAND_2GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ sband = wiphy->bands[NL80211_BAND_2GHZ];
/*
*If no country IE has been received always enable active scan
@@ -262,10 +262,10 @@ static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
struct ieee80211_channel *ch;
unsigned int i;
- if (!wiphy->bands[IEEE80211_BAND_5GHZ])
+ if (!wiphy->bands[NL80211_BAND_5GHZ])
return;
- sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
for (i = 0; i < sband->n_channels; i++) {
ch = &sband->channels[i];
@@ -301,12 +301,12 @@ static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
static void _rtl_dump_channel_map(struct wiphy *wiphy)
{
- enum ieee80211_band band;
+ enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_channel *ch;
unsigned int i;
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
if (!wiphy->bands[band])
continue;
sband = wiphy->bands[band];
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
index ce4da9d79..db9a7829d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/dm.c
@@ -1137,7 +1137,7 @@ void rtl88e_dm_check_txpower_tracking(struct ieee80211_hw *hw)
} else {
RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
"Schedule TxPowerTracking !!\n");
- dm_txpower_track_cb_therm(hw);
+ dm_txpower_track_cb_therm(hw);
rtlpriv->dm.tm_trigger = 0;
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
index a2bb02c7b..416a9ba63 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/phy.c
@@ -1903,8 +1903,7 @@ static void _rtl88e_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl88e_phy_set_rfpath_switch(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
index 03cbe4cf1..316be5ff6 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/dm_common.c
@@ -240,7 +240,7 @@ static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
index 24eff8ea4..35e6bf7e2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c
@@ -368,7 +368,7 @@ bool rtl92ee_rx_query_desc(struct ieee80211_hw *hw,
status->decrypted = !GET_RX_DESC_SWDEC(pdesc);
status->rate = (u8)GET_RX_DESC_RXMCS(pdesc);
status->isampdu = (bool)(GET_RX_DESC_PAGGR(pdesc) == 1);
- status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
+ status->timestamp_low = GET_RX_DESC_TSFL(pdesc);
status->is_cck = RTL92EE_RX_HAL_IS_CCK_RATE(status->rate);
status->macid = GET_RX_DESC_MACID(pdesc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
index 4b4612fe2..881821f4e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/phy.c
@@ -645,7 +645,7 @@ bool rtl92s_phy_set_rf_power_state(struct ieee80211_hw *hw,
rtlpriv->psc.state_inap);
ppsc->last_sleep_jiffies = jiffies;
_rtl92se_phy_set_rf_sleep(hw);
- break;
+ break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"switch case not processed\n");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
index 78a81c1e3..9475aa2a8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/rf.c
@@ -208,8 +208,7 @@ static void _rtl92s_get_txpower_writeval_byregulatory(struct ieee80211_hw *hw,
"Realtek regulatory, 40MHz, writeval = 0x%x\n",
writeval);
} else {
- if (rtlphy->pwrgroup_cnt == 1)
- chnlgroup = 0;
+ chnlgroup = 0;
if (rtlphy->pwrgroup_cnt >= 3) {
if (chnl <= 3)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
index 00a0531cc..44de695dc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/hal_btc.c
@@ -134,9 +134,9 @@ static bool rtl8723e_dm_bt_need_to_dec_bt_pwr(struct ieee80211_hw *hw)
if (mgnt_link_status_query(hw) == RT_MEDIA_CONNECT) {
RT_TRACE(rtlpriv, COMP_BT_COEXIST, DBG_DMESG,
"Need to decrease bt power\n");
- rtlpriv->btcoexist.cstate |=
- BT_COEX_STATE_DEC_BT_POWER;
- return true;
+ rtlpriv->btcoexist.cstate |=
+ BT_COEX_STATE_DEC_BT_POWER;
+ return true;
}
rtlpriv->btcoexist.cstate &= ~BT_COEX_STATE_DEC_BT_POWER;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index b7b73cbe3..445f681d0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1723,8 +1723,8 @@ static u8 _rtl8723be_phy_path_a_rx_iqk(struct ieee80211_hw *hw)
/* Allen 20131125 */
tmp = (reg_eac & 0x03FF0000) >> 16;
- if ((tmp & 0x200) > 0)
- tmp = 0x400 - tmp;
+ if ((tmp & 0x200) > 0)
+ tmp = 0x400 - tmp;
/* if Tx is OK, check whether Rx is OK */
if (!(reg_eac & BIT(27)) &&
(((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -2301,8 +2301,7 @@ static void _rtl8723be_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
} else {
rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
}
-RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
-
+ RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "\n");
}
static void _rtl8723be_phy_set_rfpath_switch(struct ieee80211_hw *hw,
@@ -2606,8 +2605,7 @@ static bool _rtl8723be_phy_set_rf_power_state(struct ieee80211_hw *hw,
"IPS Set eRf nic enable\n");
rtstatus = rtl_ps_enable_nic(hw);
} while (!rtstatus && (initializecount < 10));
- RT_CLEAR_PS_LEVEL(ppsc,
- RT_RF_OFF_LEVL_HALT_NIC);
+ RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
} else {
RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
"Set ERFON sleeped:%d ms\n",
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
index 5ed4492d3..97f5a0377 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/rf.c
@@ -303,8 +303,8 @@ static void _rtl8723be_get_txpower_writeval_by_regulatory(
[chnlgroup][index + (rf ? 8 : 0)] &
(0x7f << (i * 8))) >> (i * 8));
- if (pwr_diff_limit[i] > pwr_diff)
- pwr_diff_limit[i] = pwr_diff;
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
}
customer_limit = (pwr_diff_limit[3] << 24) |
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
index 6a8245c4e..17a681788 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/dm.c
@@ -1957,9 +1957,9 @@ void rtl8812ae_dm_txpower_tracking_callback_thermalmeter(
rtldm->swing_idx_ofdm_base[p] =
rtldm->swing_idx_ofdm[p];
- RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
- rtldm->thermalvalue, thermal_value);
+ RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pDM_Odm->RFCalibrateInfo.ThermalValue =%d ThermalValue= %d\n",
+ rtldm->thermalvalue, thermal_value);
/*Record last Power Tracking Thermal Value*/
rtldm->thermalvalue = thermal_value;
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
index fe900badd..71e4dd996 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
@@ -2315,14 +2315,14 @@ static void _rtl8821ae_clear_pci_pme_status(struct ieee80211_hw *hw)
pci_read_config_byte(rtlpci->pdev, 0x34, &cap_pointer);
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "PCI configration 0x34 = 0x%2x\n", cap_pointer);
+ "PCI configuration 0x34 = 0x%2x\n", cap_pointer);
do {
pci_read_config_word(rtlpci->pdev, cap_pointer, &cap_hdr);
cap_id = cap_hdr & 0xFF;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
- "in pci configration, cap_pointer%x = %x\n",
+ "in pci configuration, cap_pointer%x = %x\n",
cap_pointer, cap_id);
if (cap_id == 0x01) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 74165b3eb..0c3b9ce86 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -418,9 +418,9 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
out = 0x16A; /* -3 dB */
}
} else {
- u32 swing = 0, swing_a = 0, swing_b = 0;
+ u32 swing = 0, swing_a = 0, swing_b = 0;
- if (band == BAND_ON_2_4G) {
+ if (band == BAND_ON_2_4G) {
if (reg_swing_2g == auto_temp) {
efuse_shadow_read(hw, 1, 0xC6, (u32 *)&swing);
swing = (swing == 0xFF) ? 0x00 : swing;
@@ -514,7 +514,7 @@ u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
RT_TRACE(rtlpriv, COMP_SCAN, DBG_LOUD,
"<=== PHY_GetTxBBSwing_8812A, out = 0x%X\n", out);
- return out;
+ return out;
}
void rtl8821ae_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
@@ -959,7 +959,7 @@ static void _rtl8821ae_phy_store_txpower_by_rate_base(struct ieee80211_hw *hw)
static void _phy_convert_txpower_dbm_to_relative_value(u32 *data, u8 start,
u8 end, u8 base_val)
{
- char i = 0;
+ int i;
u8 temp_value = 0;
u32 temp_data = 0;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index aac1ed3f7..41617b7b0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1049,7 +1049,7 @@ static void rtl_fill_h2c_cmd_work_callback(struct work_struct *work)
rtlpriv->cfg->ops->fill_h2c_cmd(hw, H2C_RA_MASK, 5, rtlpriv->rate_mask);
}
-static struct rtl_intf_ops rtl_usb_ops = {
+static const struct rtl_intf_ops rtl_usb_ops = {
.adapter_start = rtl_usb_start,
.adapter_stop = rtl_usb_stop,
.adapter_tx = rtl_usb_tx,
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 93bd7fcd2..4e0ab4d42 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -1359,7 +1359,7 @@ struct rtl_mac {
u32 tx_ss_num;
u32 rx_ss_num;
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band bands[NUM_NL80211_BANDS];
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
enum nl80211_iftype opmode;
@@ -2593,7 +2593,7 @@ struct rtl_priv {
*intf_ops : for diff interrface usb/pcie
*/
struct rtl_hal_cfg *cfg;
- struct rtl_intf_ops *intf_ops;
+ const struct rtl_intf_ops *intf_ops;
/*this var will be set by set_bit,
and was used to indicate status of
@@ -2870,7 +2870,7 @@ value to host byte ordering.*/
(ppsc->cur_ps_level |= _ps_flg)
#define container_of_dwork_rtl(x, y, z) \
- container_of(container_of(x, struct delayed_work, work), y, z)
+ container_of(to_delayed_work(x), y, z)
#define FILL_OCTET_STRING(_os, _octet, _len) \
(_os).octet = (u8 *)(_octet); \
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index a13d1f2b5..569918c48 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -1291,7 +1291,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
return 0;
dsconfig = 1000 *
- ieee80211_channel_to_frequency(channel, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
len = sizeof(config);
ret = rndis_query_oid(usbdev,
@@ -3476,7 +3476,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
priv->band.n_channels = ARRAY_SIZE(rndis_channels);
priv->band.bitrates = priv->rates;
priv->band.n_bitrates = ARRAY_SIZE(rndis_rates);
- wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
memcpy(priv->cipher_suites, rndis_cipher_suites,
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 4df992de7..dbb23899d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -20,84 +20,84 @@
#include "rsi_common.h"
static const struct ieee80211_channel rsi_2ghz_channels[] = {
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2412,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2412,
.hw_value = 1 }, /* Channel 1 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2417,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2417,
.hw_value = 2 }, /* Channel 2 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2422,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2422,
.hw_value = 3 }, /* Channel 3 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2427,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2427,
.hw_value = 4 }, /* Channel 4 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2432,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2432,
.hw_value = 5 }, /* Channel 5 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2437,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2437,
.hw_value = 6 }, /* Channel 6 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2442,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2442,
.hw_value = 7 }, /* Channel 7 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2447,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2447,
.hw_value = 8 }, /* Channel 8 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2452,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2452,
.hw_value = 9 }, /* Channel 9 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2457,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2457,
.hw_value = 10 }, /* Channel 10 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2462,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2462,
.hw_value = 11 }, /* Channel 11 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2467,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2467,
.hw_value = 12 }, /* Channel 12 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2472,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2472,
.hw_value = 13 }, /* Channel 13 */
- { .band = IEEE80211_BAND_2GHZ, .center_freq = 2484,
+ { .band = NL80211_BAND_2GHZ, .center_freq = 2484,
.hw_value = 14 }, /* Channel 14 */
};
static const struct ieee80211_channel rsi_5ghz_channels[] = {
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5180,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5180,
.hw_value = 36, }, /* Channel 36 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5200,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5200,
.hw_value = 40, }, /* Channel 40 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5220,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5220,
.hw_value = 44, }, /* Channel 44 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5240,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5240,
.hw_value = 48, }, /* Channel 48 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5260,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5260,
.hw_value = 52, }, /* Channel 52 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5280,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5280,
.hw_value = 56, }, /* Channel 56 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5300,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5300,
.hw_value = 60, }, /* Channel 60 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5320,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5320,
.hw_value = 64, }, /* Channel 64 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5500,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5500,
.hw_value = 100, }, /* Channel 100 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5520,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5520,
.hw_value = 104, }, /* Channel 104 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5540,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5540,
.hw_value = 108, }, /* Channel 108 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5560,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5560,
.hw_value = 112, }, /* Channel 112 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5580,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5580,
.hw_value = 116, }, /* Channel 116 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5600,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5600,
.hw_value = 120, }, /* Channel 120 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5620,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5620,
.hw_value = 124, }, /* Channel 124 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5640,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5640,
.hw_value = 128, }, /* Channel 128 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5660,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5660,
.hw_value = 132, }, /* Channel 132 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5680,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5680,
.hw_value = 136, }, /* Channel 136 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5700,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5700,
.hw_value = 140, }, /* Channel 140 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5745,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5745,
.hw_value = 149, }, /* Channel 149 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5765,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5765,
.hw_value = 153, }, /* Channel 153 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5785,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5785,
.hw_value = 157, }, /* Channel 157 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5805,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5805,
.hw_value = 161, }, /* Channel 161 */
- { .band = IEEE80211_BAND_5GHZ, .center_freq = 5825,
+ { .band = NL80211_BAND_5GHZ, .center_freq = 5825,
.hw_value = 165, }, /* Channel 165 */
};
@@ -150,12 +150,12 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
struct ieee80211_supported_band *sbands = &adapter->sbands[band];
void *channels = NULL;
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
memcpy(channels,
rsi_2ghz_channels,
sizeof(rsi_2ghz_channels));
- sbands->band = IEEE80211_BAND_2GHZ;
+ sbands->band = NL80211_BAND_2GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
sbands->bitrates = rsi_rates;
sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
@@ -164,7 +164,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
memcpy(channels,
rsi_5ghz_channels,
sizeof(rsi_5ghz_channels));
- sbands->band = IEEE80211_BAND_5GHZ;
+ sbands->band = NL80211_BAND_5GHZ;
sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
sbands->bitrates = &rsi_rates[4];
sbands->n_bitrates = ARRAY_SIZE(rsi_rates) - 4;
@@ -775,7 +775,7 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- enum ieee80211_band band = hw->conf.chandef.chan->band;
+ enum nl80211_band band = hw->conf.chandef.chan->band;
mutex_lock(&common->mutex);
common->fixedrate_mask[band] = 0;
@@ -999,8 +999,8 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
mutex_lock(&common->mutex);
/* Resetting all the fields to default values */
- common->bitrate_mask[IEEE80211_BAND_2GHZ] = 0;
- common->bitrate_mask[IEEE80211_BAND_5GHZ] = 0;
+ common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
+ common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
common->min_rate = 0xffff;
common->vif_info[0].is_ht = false;
common->vif_info[0].sgi = false;
@@ -1070,8 +1070,8 @@ int rsi_mac80211_attach(struct rsi_common *common)
hw->max_rate_tries = MAX_RETRIES;
hw->max_tx_aggregation_subframes = 6;
- rsi_register_rates_channels(adapter, IEEE80211_BAND_2GHZ);
- rsi_register_rates_channels(adapter, IEEE80211_BAND_5GHZ);
+ rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
+ rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
hw->rate_control_algorithm = "AARF";
SET_IEEE80211_PERM_ADDR(hw, common->mac_addr);
@@ -1087,10 +1087,10 @@ int rsi_mac80211_attach(struct rsi_common *common)
wiphy->available_antennas_rx = 1;
wiphy->available_antennas_tx = 1;
- wiphy->bands[IEEE80211_BAND_2GHZ] =
- &adapter->sbands[IEEE80211_BAND_2GHZ];
- wiphy->bands[IEEE80211_BAND_5GHZ] =
- &adapter->sbands[IEEE80211_BAND_5GHZ];
+ wiphy->bands[NL80211_BAND_2GHZ] =
+ &adapter->sbands[NL80211_BAND_2GHZ];
+ wiphy->bands[NL80211_BAND_5GHZ] =
+ &adapter->sbands[NL80211_BAND_5GHZ];
status = ieee80211_register_hw(hw);
if (status)
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index e43b59d5b..40658b62d 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -210,7 +210,7 @@ static u16 mcs[] = {13, 26, 39, 52, 78, 104, 117, 130};
*/
static void rsi_set_default_parameters(struct rsi_common *common)
{
- common->band = IEEE80211_BAND_2GHZ;
+ common->band = NL80211_BAND_2GHZ;
common->channel_width = BW_20MHZ;
common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
common->channel = 1;
@@ -655,7 +655,7 @@ int rsi_set_vap_capabilities(struct rsi_common *common, enum opmode mode)
vap_caps->rts_threshold = cpu_to_le16(common->rts_threshold);
vap_caps->default_mgmt_rate = cpu_to_le32(RSI_RATE_6);
- if (common->band == IEEE80211_BAND_5GHZ) {
+ if (common->band == NL80211_BAND_5GHZ) {
vap_caps->default_ctrl_rate = cpu_to_le32(RSI_RATE_6);
if (conf_is_ht40(&common->priv->hw->conf)) {
vap_caps->default_ctrl_rate |=
@@ -872,7 +872,7 @@ int rsi_band_check(struct rsi_common *common)
else
common->channel_width = BW_40MHZ;
- if (common->band == IEEE80211_BAND_2GHZ) {
+ if (common->band == NL80211_BAND_2GHZ) {
if (common->channel_width)
common->endpoint = EP_2GHZ_40MHZ;
else
@@ -1046,7 +1046,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common)
if (common->channel_width == BW_40MHZ)
auto_rate->desc_word[7] |= cpu_to_le16(1);
- if (band == IEEE80211_BAND_2GHZ) {
+ if (band == NL80211_BAND_2GHZ) {
min_rate = RSI_RATE_1;
rate_table_offset = 0;
} else {
diff --git a/drivers/net/wireless/rsi/rsi_91x_pkt.c b/drivers/net/wireless/rsi/rsi_91x_pkt.c
index 702593f19..02920c93e 100644
--- a/drivers/net/wireless/rsi/rsi_91x_pkt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_pkt.c
@@ -27,22 +27,24 @@
int rsi_send_data_pkt(struct rsi_common *common, struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *tmp_hdr = NULL;
+ struct ieee80211_hdr *tmp_hdr;
struct ieee80211_tx_info *info;
struct skb_info *tx_params;
- struct ieee80211_bss_conf *bss = NULL;
- int status = -EINVAL;
+ struct ieee80211_bss_conf *bss;
+ int status;
u8 ieee80211_size = MIN_802_11_HDR_LEN;
- u8 extnd_size = 0;
+ u8 extnd_size;
__le16 *frame_desc;
- u16 seq_num = 0;
+ u16 seq_num;
info = IEEE80211_SKB_CB(skb);
bss = &info->control.vif->bss_conf;
tx_params = (struct skb_info *)info->driver_data;
- if (!bss->assoc)
+ if (!bss->assoc) {
+ status = -EINVAL;
goto err;
+ }
tmp_hdr = (struct ieee80211_hdr *)&skb->data[0];
seq_num = (le16_to_cpu(tmp_hdr->seq_ctrl) >> 4);
@@ -123,15 +125,15 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
struct sk_buff *skb)
{
struct rsi_hw *adapter = common->priv;
- struct ieee80211_hdr *wh = NULL;
+ struct ieee80211_hdr *wh;
struct ieee80211_tx_info *info;
- struct ieee80211_bss_conf *bss = NULL;
+ struct ieee80211_bss_conf *bss;
struct ieee80211_hw *hw = adapter->hw;
struct ieee80211_conf *conf = &hw->conf;
struct skb_info *tx_params;
int status = -E2BIG;
- __le16 *msg = NULL;
- u8 extnd_size = 0;
+ __le16 *msg;
+ u8 extnd_size;
u8 vap_id = 0;
info = IEEE80211_SKB_CB(skb);
@@ -182,7 +184,7 @@ int rsi_send_mgmt_pkt(struct rsi_common *common,
if (wh->addr1[0] & BIT(0))
msg[3] |= cpu_to_le16(RSI_BROADCAST_PKT);
- if (common->band == IEEE80211_BAND_2GHZ)
+ if (common->band == NL80211_BAND_2GHZ)
msg[4] = cpu_to_le16(RSI_11B_MODE);
else
msg[4] = cpu_to_le16((RSI_RATE_6 & 0x0f) | RSI_11G_MODE);
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 5baed945f..dcd095787 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -211,7 +211,7 @@ struct rsi_hw {
struct ieee80211_hw *hw;
struct ieee80211_vif *vifs[RSI_MAX_VIFS];
struct ieee80211_tx_queue_params edca_params[NUM_EDCA_QUEUES];
- struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
+ struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
struct device *device;
u8 sc_nvifs;
diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
index 0e51e27d2..dc478cedb 100644
--- a/drivers/net/wireless/st/cw1200/main.c
+++ b/drivers/net/wireless/st/cw1200/main.c
@@ -102,7 +102,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
#define CHAN2G(_channel, _freq, _flags) { \
- .band = IEEE80211_BAND_2GHZ, \
+ .band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -111,7 +111,7 @@ static struct ieee80211_rate cw1200_mcs_rates[] = {
}
#define CHAN5G(_channel, _flags) { \
- .band = IEEE80211_BAND_5GHZ, \
+ .band = NL80211_BAND_5GHZ, \
.center_freq = 5000 + (5 * (_channel)), \
.hw_value = (_channel), \
.flags = (_flags), \
@@ -311,12 +311,12 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
hw->sta_data_size = sizeof(struct cw1200_sta_priv);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &cw1200_band_2ghz;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz;
if (have_5ghz)
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &cw1200_band_5ghz;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz;
/* Channel params have to be cleared before registering wiphy again */
- for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = hw->wiphy->bands[band];
if (!sband)
continue;
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index bff81b8d4..983788156 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -402,7 +402,7 @@ void cw1200_probe_work(struct work_struct *work)
}
wsm = (struct wsm_tx *)frame.skb->data;
scan.max_tx_rate = wsm->max_tx_rate;
- scan.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
if (priv->join_status == CW1200_JOIN_STATUS_STA ||
priv->join_status == CW1200_JOIN_STATUS_IBSS) {
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index eda75acf6..4c7fd2618 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -1278,7 +1278,7 @@ static void cw1200_do_join(struct cw1200_common *priv)
join.dtim_period = priv->join_dtim_period;
join.channel_number = priv->channel->hw_value;
- join.band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ join.band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
memcpy(join.bssid, bssid, sizeof(join.bssid));
@@ -1462,7 +1462,7 @@ int cw1200_enable_listening(struct cw1200_common *priv)
};
if (priv->channel) {
- start.band = priv->channel->band == IEEE80211_BAND_5GHZ ?
+ start.band = priv->channel->band == NL80211_BAND_5GHZ ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G;
start.channel_number = priv->channel->hw_value;
} else {
@@ -2315,7 +2315,7 @@ static int cw1200_start_ap(struct cw1200_common *priv)
struct wsm_start start = {
.mode = priv->vif->p2p ?
WSM_START_MODE_P2P_GO : WSM_START_MODE_AP,
- .band = (priv->channel->band == IEEE80211_BAND_5GHZ) ?
+ .band = (priv->channel->band == NL80211_BAND_5GHZ) ?
WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G,
.channel_number = priv->channel->hw_value,
.beacon_interval = conf->beacon_int,
diff --git a/drivers/net/wireless/st/cw1200/txrx.c b/drivers/net/wireless/st/cw1200/txrx.c
index d28bd49cb..3d170287c 100644
--- a/drivers/net/wireless/st/cw1200/txrx.c
+++ b/drivers/net/wireless/st/cw1200/txrx.c
@@ -1079,7 +1079,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
hdr->band = ((arg->channel_number & 0xff00) ||
(arg->channel_number > 14)) ?
- IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
+ NL80211_BAND_5GHZ : NL80211_BAND_2GHZ;
hdr->freq = ieee80211_channel_to_frequency(
arg->channel_number,
hdr->band);
diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c
index 9e0ca3048..680d60eab 100644
--- a/drivers/net/wireless/st/cw1200/wsm.c
+++ b/drivers/net/wireless/st/cw1200/wsm.c
@@ -849,9 +849,9 @@ static int wsm_startup_indication(struct cw1200_common *priv,
/* Disable unsupported frequency bands */
if (!(priv->wsm_caps.fw_cap & 0x1))
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = NULL;
+ priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
if (!(priv->wsm_caps.fw_cap & 0x2))
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL;
+ priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
priv->firmware_ready = 1;
wake_up(&priv->wsm_startup_done);
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index a0536aed7..e74d60dad 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1482,7 +1482,7 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
wl->hw->queues = 4;
diff --git a/drivers/net/wireless/ti/wl1251/ps.c b/drivers/net/wireless/ti/wl1251/ps.c
index b9e27b98b..fa01b0a0f 100644
--- a/drivers/net/wireless/ti/wl1251/ps.c
+++ b/drivers/net/wireless/ti/wl1251/ps.c
@@ -32,7 +32,7 @@ void wl1251_elp_work(struct work_struct *work)
struct delayed_work *dwork;
struct wl1251 *wl;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1251, elp_work);
wl1251_debug(DEBUG_PSM, "elp work");
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index cde0eaf99..a27d4c22b 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -53,7 +53,7 @@ static void wl1251_rx_status(struct wl1251 *wl,
memset(status, 0, sizeof(struct ieee80211_rx_status));
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
status->mactime = desc->timestamp;
/*
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index adfb3e9de..29f13f890 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -469,8 +469,8 @@ static const u8 wl12xx_rate_to_idx_5ghz[] = {
};
static const u8 *wl12xx_band_rate_to_idx[] = {
- [IEEE80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
- [IEEE80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
+ [NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
+ [NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
};
enum wl12xx_hw_rates {
@@ -553,8 +553,8 @@ static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
.size = 0x00000004
},
.mem3 = {
- .start = 0x00040404,
- .size = 0x00000000
+ .start = 0x00000000,
+ .size = 0x00040404
},
},
@@ -1827,8 +1827,8 @@ static int wl12xx_setup(struct wl1271 *wl)
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
wl->ofdm_only_ap = true;
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
if (!fref_param) {
diff --git a/drivers/net/wireless/ti/wl12xx/scan.c b/drivers/net/wireless/ti/wl12xx/scan.c
index ebed13af9..8d475393f 100644
--- a/drivers/net/wireless/ti/wl12xx/scan.c
+++ b/drivers/net/wireless/ti/wl12xx/scan.c
@@ -27,7 +27,7 @@
static int wl1271_get_scan_channels(struct wl1271 *wl,
struct cfg80211_scan_request *req,
struct basic_scan_channel_params *channels,
- enum ieee80211_band band, bool passive)
+ enum nl80211_band band, bool passive)
{
struct conf_scan_settings *c = &wl->conf.scan;
int i, j;
@@ -92,7 +92,7 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band,
+ enum nl80211_band band,
bool passive, u32 basic_rate)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
@@ -144,12 +144,12 @@ static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
else
cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
- if (wl->scan.ssid_len && wl->scan.ssid) {
+ if (wl->scan.ssid_len) {
cmd->params.ssid_len = wl->scan.ssid_len;
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
@@ -218,7 +218,7 @@ out:
void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
- enum ieee80211_band band;
+ enum nl80211_band band;
u32 rate, mask;
switch (wl->scan.state) {
@@ -226,7 +226,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
@@ -243,7 +243,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
@@ -263,7 +263,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -274,7 +274,7 @@ void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
@@ -378,7 +378,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
wl12xx_adjust_channels(cfg, cfg_channels);
if (!force_passive && cfg->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
@@ -395,7 +395,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
diff --git a/drivers/net/wireless/ti/wl18xx/cmd.c b/drivers/net/wireless/ti/wl18xx/cmd.c
index a8d176ddc..63e95ba74 100644
--- a/drivers/net/wireless/ti/wl18xx/cmd.c
+++ b/drivers/net/wireless/ti/wl18xx/cmd.c
@@ -48,10 +48,10 @@ int wl18xx_cmd_channel_switch(struct wl1271 *wl,
cmd->stop_tx = ch_switch->block_tx;
switch (ch_switch->chandef.chan->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -187,7 +187,7 @@ int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
cmd->role_id = wlvif->role_id;
cmd->channel = wlvif->channel;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index ff6e46dd6..ef811848d 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -64,13 +64,13 @@ static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
u8 sync_band)
{
struct sk_buff *skb;
- enum ieee80211_band band;
+ enum nl80211_band band;
int freq;
if (sync_band == WLCORE_BAND_5GHZ)
- band = IEEE80211_BAND_5GHZ;
+ band = NL80211_BAND_5GHZ;
else
- band = IEEE80211_BAND_2GHZ;
+ band = NL80211_BAND_2GHZ;
freq = ieee80211_channel_to_frequency(sync_channel, band);
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index be21289c3..fc02c27bc 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -137,8 +137,8 @@ static const u8 wl18xx_rate_to_idx_5ghz[] = {
};
static const u8 *wl18xx_band_rate_to_idx[] = {
- [IEEE80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
- [IEEE80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
+ [NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
+ [NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
};
enum wl18xx_hw_rates {
@@ -1302,12 +1302,12 @@ static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
/* sanity check - we don't support this */
- if (WARN_ON(wlvif->band != IEEE80211_BAND_5GHZ))
+ if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
return 0;
return CONF_TX_RATE_USE_WIDE_CHAN;
} else if (wl18xx_is_mimo_supported(wl) &&
- wlvif->band == IEEE80211_BAND_2GHZ) {
+ wlvif->band == NL80211_BAND_2GHZ) {
wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
/*
* we don't care about HT channel here - if a peer doesn't
@@ -1996,24 +1996,24 @@ static int wl18xx_setup(struct wl1271 *wl)
* siso40.
*/
if (wl18xx_is_mimo_supported(wl))
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_mimo_ht_cap_2ghz);
else
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
/* 5Ghz is always wide */
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_WIDE) {
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_SISO20) {
- wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso20_ht_cap);
- wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ,
+ wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso20_ht_cap);
}
diff --git a/drivers/net/wireless/ti/wl18xx/scan.c b/drivers/net/wireless/ti/wl18xx/scan.c
index bc15aa2c3..4e5221544 100644
--- a/drivers/net/wireless/ti/wl18xx/scan.c
+++ b/drivers/net/wireless/ti/wl18xx/scan.c
@@ -110,7 +110,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
/* TODO: per-band ies? */
if (cmd->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -127,7 +127,7 @@ static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
if (cmd->active[1] || cmd->dfs) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -253,7 +253,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
cmd->terminate_on_report = 0;
if (cmd->active[0]) {
- u8 band = IEEE80211_BAND_2GHZ;
+ u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
@@ -270,7 +270,7 @@ int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
}
if (cmd->active[1] || cmd->dfs) {
- u8 band = IEEE80211_BAND_5GHZ;
+ u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 3406ffb53..ebaf66ef3 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -43,7 +43,7 @@ void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
rate->idx = fw_rate;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
rate->flags = 0;
} else {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index f01d24baf..33153565a 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- enum ieee80211_band band,
+ enum nl80211_band band,
int channel)
{
struct wl12xx_cmd_role_start *cmd;
@@ -438,7 +438,7 @@ static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
cmd->role_id = wlvif->dev_role_id;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = channel;
@@ -524,7 +524,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -693,10 +693,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
cmd->ap.local_rates = cpu_to_le32(supported_rates);
switch (wlvif->band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -773,7 +773,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
- if (wlvif->band == IEEE80211_BAND_5GHZ)
+ if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
@@ -1164,7 +1164,7 @@ int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
- if (band == IEEE80211_BAND_2GHZ)
+ if (band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, role_id,
template_id_2_4,
skb->data, skb->len, 0, rate);
@@ -1195,7 +1195,7 @@ struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
- if (wlvif->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1628,19 +1628,19 @@ out:
return ret;
}
-static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
+static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
{
/*
* map the given band/channel to the respective predefined
* bit expected by the fw
*/
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
/* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
return ch - 1;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
switch (ch) {
case 8 ... 16:
/* channels 8,12,16 are mapped to 18,19,20 */
@@ -1670,7 +1670,7 @@ static int wlcore_get_reg_conf_ch_idx(enum ieee80211_band band, u16 ch)
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
- enum ieee80211_band band)
+ enum nl80211_band band)
{
int ch_bit_idx = 0;
@@ -1699,7 +1699,7 @@ int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
memset(tmp_ch_bitmap, 0, sizeof(tmp_ch_bitmap));
- for (b = IEEE80211_BAND_2GHZ; b <= IEEE80211_BAND_5GHZ; b++) {
+ for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
for (i = 0; i < band->n_channels; i++) {
struct ieee80211_channel *channel = &band->channels[i];
@@ -1851,7 +1851,7 @@ out:
}
static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- u8 role_id, enum ieee80211_band band, u8 channel)
+ u8 role_id, enum nl80211_band band, u8 channel)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
@@ -1870,10 +1870,10 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->role_id = role_id;
cmd->channel = channel;
switch (band) {
- case IEEE80211_BAND_2GHZ:
+ case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
- case IEEE80211_BAND_5GHZ:
+ case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
@@ -1925,7 +1925,7 @@ out:
}
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
- enum ieee80211_band band, u8 channel)
+ enum nl80211_band band, u8 channel)
{
int ret = 0;
@@ -1995,7 +1995,7 @@ out:
/* start dev role and roc on its channel */
int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band, int channel)
+ enum nl80211_band band, int channel)
{
int ret;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index e28e2f230..52c3b4860 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -40,7 +40,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
- enum ieee80211_band band, int channel);
+ enum nl80211_band band, int channel);
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
@@ -83,14 +83,14 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
- enum ieee80211_band band, u8 channel);
+ enum nl80211_band band, u8 channel);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
- enum ieee80211_band band);
+ enum nl80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 feature, u8 enable, u8 value);
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 564ca750c..1cc6d5ab0 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,25 @@ int wlcore_set_partition(struct wl1271 *wl,
if (ret < 0)
goto out;
- /* We don't need the size of the last partition, as it is
- * automatically calculated based on the total memory size and
- * the sizes of the previous partitions.
+ /* wl12xx only: We don't need the size of the last partition,
+ * as it is automatically calculated based on the total memory
+ * size and the sizes of the previous partitions.
+ *
+ * wl18xx re-defines the HW_PART3 addresses for logger over
+ * SDIO support. wl12xx is expecting the write to
+ * HW_PART3_START_ADDR at offset 24. This creates conflict
+ * between the addresses.
+ * In order to fix this the expected value is written to
+ * HW_PART3_SIZE_ADDR instead which is at offset 24 after changes.
*/
ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
if (ret < 0)
goto out;
+ ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
+ if (ret < 0)
+ goto out;
+
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 10cf37476..704ce6467 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,7 +36,8 @@
#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28)
#define HW_ACCESS_REGISTER_SIZE 4
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index 1edf6eed7..2509c0050 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -243,7 +243,7 @@ static void wl12xx_tx_watchdog_work(struct work_struct *work)
struct delayed_work *dwork;
struct wl1271 *wl;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, tx_watchdog_work);
mutex_lock(&wl->mutex);
@@ -1930,7 +1930,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wlcore_enable_interrupts(wl);
- wl->band = IEEE80211_BAND_2GHZ;
+ wl->band = NL80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
@@ -2011,7 +2011,7 @@ static void wlcore_channel_switch_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
wl = wlvif->wl;
@@ -2047,7 +2047,7 @@ static void wlcore_connection_loss_work(struct work_struct *work)
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
wl = wlvif->wl;
@@ -2076,7 +2076,7 @@ static void wlcore_pending_auth_complete_work(struct work_struct *work)
unsigned long time_spare;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif,
pending_auth_complete_work);
wl = wlvif->wl;
@@ -2240,8 +2240,8 @@ static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
wlvif->rate_set = CONF_TX_ENABLED_RATES;
}
- wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
/*
@@ -2330,7 +2330,7 @@ power_off:
* 11a channels if not supported
*/
if (!wl->enable_11a)
- wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
+ wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
@@ -5588,7 +5588,7 @@ static void wlcore_roc_complete_work(struct work_struct *work)
struct wl1271 *wl;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, roc_complete_work);
ret = wlcore_roc_completed(wl);
@@ -5871,7 +5871,7 @@ static const struct ieee80211_ops wl1271_ops = {
};
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
{
u8 idx;
@@ -6096,21 +6096,21 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
* We keep local copies of the band structs because we need to
* modify them on a per-device basis.
*/
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
+ memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
sizeof(wl1271_band_2ghz));
- memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
- &wl->ht_cap[IEEE80211_BAND_2GHZ],
+ memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
+ &wl->ht_cap[NL80211_BAND_2GHZ],
sizeof(*wl->ht_cap));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
+ memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
sizeof(wl1271_band_5ghz));
- memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
- &wl->ht_cap[IEEE80211_BAND_5GHZ],
+ memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
+ &wl->ht_cap[NL80211_BAND_5GHZ],
sizeof(*wl->ht_cap));
- wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &wl->bands[IEEE80211_BAND_2GHZ];
- wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &wl->bands[IEEE80211_BAND_5GHZ];
+ wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
+ &wl->bands[NL80211_BAND_2GHZ];
+ wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
+ &wl->bands[NL80211_BAND_5GHZ];
/*
* allow 4 queues per mac address we support +
@@ -6205,7 +6205,7 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->channel = 0;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->band = IEEE80211_BAND_2GHZ;
+ wl->band = NL80211_BAND_2GHZ;
wl->channel_type = NL80211_CHAN_NO_HT;
wl->flags = 0;
wl->sg_enabled = true;
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 4cd316e61..b36133b73 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -38,7 +38,7 @@ void wl1271_elp_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, elp_work);
wl1271_debug(DEBUG_PSM, "elp work");
@@ -202,7 +202,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
* enable beacon early termination.
* Not relevant for 5GHz and for high rates.
*/
- if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, true);
if (ret < 0)
@@ -213,7 +213,7 @@ int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if ((wlvif->band == IEEE80211_BAND_2GHZ) &&
+ if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 34e7e938e..c9bd294a0 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -64,9 +64,9 @@ static void wl1271_rx_status(struct wl1271 *wl,
memset(status, 0, sizeof(struct ieee80211_rx_status));
if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
- status->band = IEEE80211_BAND_2GHZ;
+ status->band = NL80211_BAND_2GHZ;
else
- status->band = IEEE80211_BAND_5GHZ;
+ status->band = NL80211_BAND_5GHZ;
status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index f5a7087cf..57c056563 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -146,7 +146,7 @@ struct wl1271_rx_descriptor {
} __packed;
int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
-u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
+u8 wl1271_rate_to_idx(int rate, enum nl80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter);
diff --git a/drivers/net/wireless/ti/wlcore/scan.c b/drivers/net/wireless/ti/wlcore/scan.c
index 1e3d51cd6..233436432 100644
--- a/drivers/net/wireless/ti/wlcore/scan.c
+++ b/drivers/net/wireless/ti/wlcore/scan.c
@@ -38,7 +38,7 @@ void wl1271_scan_complete_work(struct work_struct *work)
struct wl12xx_vif *wlvif;
int ret;
- dwork = container_of(work, struct delayed_work, work);
+ dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, scan_complete_work);
wl1271_debug(DEBUG_SCAN, "Scanning complete");
@@ -164,7 +164,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
u32 delta_per_probe;
- if (band == IEEE80211_BAND_5GHZ)
+ if (band == NL80211_BAND_5GHZ)
delta_per_probe = c->dwell_time_delta_per_probe_5;
else
delta_per_probe = c->dwell_time_delta_per_probe;
@@ -215,7 +215,7 @@ wlcore_scan_get_channels(struct wl1271 *wl,
channels[j].channel = req_channels[i]->hw_value;
if (n_pactive_ch &&
- (band == IEEE80211_BAND_2GHZ) &&
+ (band == NL80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
(flags & IEEE80211_CHAN_NO_IR) &&
@@ -266,7 +266,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_2,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
false, true, 0,
MAX_CHANNELS_2GHZ,
&n_pactive_ch,
@@ -277,7 +277,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_2,
- IEEE80211_BAND_2GHZ,
+ NL80211_BAND_2GHZ,
false, false,
cfg->passive[0],
MAX_CHANNELS_2GHZ,
@@ -289,7 +289,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
false, true, 0,
wl->max_channels_5,
&n_pactive_ch,
@@ -300,7 +300,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
true, true,
cfg->passive[1],
wl->max_channels_5,
@@ -312,7 +312,7 @@ wlcore_set_scan_chan_params(struct wl1271 *wl,
n_channels,
n_ssids,
cfg->channels_5,
- IEEE80211_BAND_5GHZ,
+ NL80211_BAND_5GHZ,
false, false,
cfg->passive[1] + cfg->dfs,
wl->max_channels_5,
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a4b..cea9443c2 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
ret = of_property_read_u32(dt_node, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get reference clock frequency (%d)\n", ret);
return ret;
@@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi)
}
ret = wlcore_probe_of(spi, glue, &pdev_data);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index f0ac36139..c1b8e4e9d 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -453,7 +453,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
}
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
- enum ieee80211_band rate_band)
+ enum nl80211_band rate_band)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 79cb3ff8b..e2ba62d92 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -246,9 +246,9 @@ int wlcore_tx_complete(struct wl1271 *wl);
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_tx_reset(struct wl1271 *wl);
void wl1271_tx_flush(struct wl1271 *wl);
-u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band);
+u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
- enum ieee80211_band rate_band);
+ enum nl80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 72c31a8ed..8f28aa022 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -342,7 +342,7 @@ struct wl1271 {
struct wl12xx_vif *sched_vif;
/* The current band */
- enum ieee80211_band band;
+ enum nl80211_band band;
struct completion *elp_compl;
struct delayed_work elp_work;
@@ -517,7 +517,7 @@ void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct wl1271_station *wl_sta, bool in_conn);
static inline void
-wlcore_set_ht_cap(struct wl1271 *wl, enum ieee80211_band band,
+wlcore_set_ht_cap(struct wl1271 *wl, enum nl80211_band band,
struct ieee80211_sta_ht_cap *ht_cap)
{
memcpy(&wl->ht_cap[band], ht_cap, sizeof(*ht_cap));
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index 23507ceb4..7d43b8057 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -392,7 +392,7 @@ struct wl12xx_vif {
u8 ssid_len;
/* The current band */
- enum ieee80211_band band;
+ enum nl80211_band band;
int channel;
enum nl80211_channel_type channel_type;
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index d5c371d77..13fd734b6 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1287,7 +1287,7 @@ static void wl3501_tx_timeout(struct net_device *dev)
printk(KERN_ERR "%s: Error %d resetting card on Tx timeout!\n",
dev->name, rc);
else {
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1454,7 +1454,7 @@ static int wl3501_get_freq(struct net_device *dev, struct iw_request_info *info,
struct wl3501_card *this = netdev_priv(dev);
wrqu->freq.m = 100000 *
- ieee80211_channel_to_frequency(this->chan, IEEE80211_BAND_2GHZ);
+ ieee80211_channel_to_frequency(this->chan, NL80211_BAND_2GHZ);
wrqu->freq.e = 1;
return 0;
}
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 79d6f4b88..799f70f2d 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -844,7 +844,7 @@ static void zd1201_tx_timeout(struct net_device *dev)
usb_unlink_urb(zd->tx_urb);
dev->stats.tx_errors++;
/* Restart the timeout to quiet the watchdog: */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static int zd1201_set_mac_address(struct net_device *dev, void *p)
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index e539d9b1b..3e37a045f 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -1068,7 +1068,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
- stats.band = IEEE80211_BAND_2GHZ;
+ stats.band = NL80211_BAND_2GHZ;
stats.signal = zd_check_signal(hw, status->signal_strength);
rate = zd_rx_rate(buffer, status);
@@ -1395,7 +1395,7 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
mac->band.n_channels = ARRAY_SIZE(zd_channels);
mac->band.channels = mac->channels;
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &mac->band;
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = &mac->band;
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
diff --git a/drivers/net/xen-netback/Makefile b/drivers/net/xen-netback/Makefile
index e346e8125..11e02be9d 100644
--- a/drivers/net/xen-netback/Makefile
+++ b/drivers/net/xen-netback/Makefile
@@ -1,3 +1,3 @@
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
-xen-netback-y := netback.o xenbus.o interface.o
+xen-netback-y := netback.o xenbus.o interface.o hash.o
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index f44b38846..84d6cbdd1 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -220,6 +220,35 @@ struct xenvif_mcast_addr {
#define XEN_NETBK_MCAST_MAX 64
+#define XEN_NETBK_MAX_HASH_KEY_SIZE 40
+#define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128
+#define XEN_NETBK_HASH_TAG_SIZE 40
+
+struct xenvif_hash_cache_entry {
+ struct list_head link;
+ struct rcu_head rcu;
+ u8 tag[XEN_NETBK_HASH_TAG_SIZE];
+ unsigned int len;
+ u32 val;
+ int seq;
+};
+
+struct xenvif_hash_cache {
+ spinlock_t lock;
+ struct list_head list;
+ unsigned int count;
+ atomic_t seq;
+};
+
+struct xenvif_hash {
+ unsigned int alg;
+ u32 flags;
+ u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE];
+ u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE];
+ unsigned int size;
+ struct xenvif_hash_cache cache;
+};
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -251,6 +280,8 @@ struct xenvif {
unsigned int num_queues; /* active queues, resource allocated */
unsigned int stalled_queues;
+ struct xenvif_hash hash;
+
struct xenbus_watch credit_watch;
struct xenbus_watch mcast_ctrl_watch;
@@ -260,6 +291,11 @@ struct xenvif {
struct dentry *xenvif_dbg_root;
#endif
+ struct xen_netif_ctrl_back_ring ctrl;
+ struct task_struct *ctrl_task;
+ wait_queue_head_t ctrl_wq;
+ unsigned int ctrl_irq;
+
/* Miscellaneous private stuff. */
struct net_device *dev;
};
@@ -285,10 +321,15 @@ struct xenvif *xenvif_alloc(struct device *parent,
int xenvif_init_queue(struct xenvif_queue *queue);
void xenvif_deinit_queue(struct xenvif_queue *queue);
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int tx_evtchn,
- unsigned int rx_evtchn);
-void xenvif_disconnect(struct xenvif *vif);
+int xenvif_connect_data(struct xenvif_queue *queue,
+ unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref,
+ unsigned int tx_evtchn,
+ unsigned int rx_evtchn);
+void xenvif_disconnect_data(struct xenvif *vif);
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ unsigned int evtchn);
+void xenvif_disconnect_ctrl(struct xenvif *vif);
void xenvif_free(struct xenvif *vif);
int xenvif_xenbus_init(void);
@@ -300,10 +341,10 @@ int xenvif_queue_stopped(struct xenvif_queue *queue);
void xenvif_wake_queue(struct xenvif_queue *queue);
/* (Un)Map communication rings. */
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref);
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue);
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
@@ -318,6 +359,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
int xenvif_dealloc_kthread(void *data);
+int xenvif_ctrl_kthread(void *data);
+
void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
@@ -341,6 +384,7 @@ extern bool separate_tx_rx_irq;
extern unsigned int rx_drain_timeout_msecs;
extern unsigned int rx_stall_timeout_msecs;
extern unsigned int xenvif_max_queues;
+extern unsigned int xenvif_hash_cache_size;
#ifdef CONFIG_DEBUG_FS
extern struct dentry *xen_netback_dbg_root;
@@ -354,4 +398,18 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
void xenvif_mcast_addr_list_free(struct xenvif *vif);
+/* Hash */
+void xenvif_init_hash(struct xenvif *vif);
+void xenvif_deinit_hash(struct xenvif *vif);
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg);
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags);
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags);
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len);
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size);
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off);
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
+
#endif /* __XEN_NETBACK__COMMON_H__ */
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
new file mode 100644
index 000000000..fb87cb39a
--- /dev/null
+++ b/drivers/net/xen-netback/hash.c
@@ -0,0 +1,384 @@
+/*
+ * Copyright (c) 2016 Citrix Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Softare Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#define XEN_NETIF_DEFINE_TOEPLITZ
+
+#include "common.h"
+#include <linux/vmalloc.h>
+#include <linux/rculist.h>
+
+static void xenvif_del_hash(struct rcu_head *rcu)
+{
+ struct xenvif_hash_cache_entry *entry;
+
+ entry = container_of(rcu, struct xenvif_hash_cache_entry, rcu);
+
+ kfree(entry);
+}
+
+static void xenvif_add_hash(struct xenvif *vif, const u8 *tag,
+ unsigned int len, u32 val)
+{
+ struct xenvif_hash_cache_entry *new, *entry, *oldest;
+ unsigned long flags;
+ bool found;
+
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!new)
+ return;
+
+ memcpy(new->tag, tag, len);
+ new->len = len;
+ new->val = val;
+
+ spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+ found = false;
+ oldest = NULL;
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ /* Make sure we don't add duplicate entries */
+ if (entry->len == len &&
+ memcmp(entry->tag, tag, len) == 0)
+ found = true;
+ if (!oldest || entry->seq < oldest->seq)
+ oldest = entry;
+ }
+
+ if (!found) {
+ new->seq = atomic_inc_return(&vif->hash.cache.seq);
+ list_add_rcu(&new->link, &vif->hash.cache.list);
+
+ if (++vif->hash.cache.count > xenvif_hash_cache_size) {
+ list_del_rcu(&oldest->link);
+ vif->hash.cache.count--;
+ call_rcu(&oldest->rcu, xenvif_del_hash);
+ }
+ }
+
+ spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+
+ if (found)
+ kfree(new);
+}
+
+static u32 xenvif_new_hash(struct xenvif *vif, const u8 *data,
+ unsigned int len)
+{
+ u32 val;
+
+ val = xen_netif_toeplitz_hash(vif->hash.key,
+ sizeof(vif->hash.key),
+ data, len);
+
+ if (xenvif_hash_cache_size != 0)
+ xenvif_add_hash(vif, data, len, val);
+
+ return val;
+}
+
+static void xenvif_flush_hash(struct xenvif *vif)
+{
+ struct xenvif_hash_cache_entry *entry;
+ unsigned long flags;
+
+ if (xenvif_hash_cache_size == 0)
+ return;
+
+ spin_lock_irqsave(&vif->hash.cache.lock, flags);
+
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ list_del_rcu(&entry->link);
+ vif->hash.cache.count--;
+ call_rcu(&entry->rcu, xenvif_del_hash);
+ }
+
+ spin_unlock_irqrestore(&vif->hash.cache.lock, flags);
+}
+
+static u32 xenvif_find_hash(struct xenvif *vif, const u8 *data,
+ unsigned int len)
+{
+ struct xenvif_hash_cache_entry *entry;
+ u32 val;
+ bool found;
+
+ if (len >= XEN_NETBK_HASH_TAG_SIZE)
+ return 0;
+
+ if (xenvif_hash_cache_size == 0)
+ return xenvif_new_hash(vif, data, len);
+
+ rcu_read_lock();
+
+ found = false;
+
+ list_for_each_entry_rcu(entry, &vif->hash.cache.list, link) {
+ if (entry->len == len &&
+ memcmp(entry->tag, data, len) == 0) {
+ val = entry->val;
+ entry->seq = atomic_inc_return(&vif->hash.cache.seq);
+ found = true;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!found)
+ val = xenvif_new_hash(vif, data, len);
+
+ return val;
+}
+
+void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb)
+{
+ struct flow_keys flow;
+ u32 hash = 0;
+ enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+ u32 flags = vif->hash.flags;
+ bool has_tcp_hdr;
+
+ /* Quick rejection test: If the network protocol doesn't
+ * correspond to any enabled hash type then there's no point
+ * in parsing the packet header.
+ */
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4))
+ break;
+
+ goto done;
+
+ case htons(ETH_P_IPV6):
+ if (flags & (XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6))
+ break;
+
+ goto done;
+
+ default:
+ goto done;
+ }
+
+ memset(&flow, 0, sizeof(flow));
+ if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
+ goto done;
+
+ has_tcp_hdr = (flow.basic.ip_proto == IPPROTO_TCP) &&
+ !(flow.control.flags & FLOW_DIS_IS_FRAGMENT);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (has_tcp_hdr &&
+ (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)) {
+ u8 data[12];
+
+ memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+ memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+ memcpy(&data[8], &flow.ports.src, 2);
+ memcpy(&data[10], &flow.ports.dst, 2);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L4;
+ } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) {
+ u8 data[8];
+
+ memcpy(&data[0], &flow.addrs.v4addrs.src, 4);
+ memcpy(&data[4], &flow.addrs.v4addrs.dst, 4);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L3;
+ }
+
+ break;
+
+ case htons(ETH_P_IPV6):
+ if (has_tcp_hdr &&
+ (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)) {
+ u8 data[36];
+
+ memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+ memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+ memcpy(&data[32], &flow.ports.src, 2);
+ memcpy(&data[34], &flow.ports.dst, 2);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L4;
+ } else if (flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) {
+ u8 data[32];
+
+ memcpy(&data[0], &flow.addrs.v6addrs.src, 16);
+ memcpy(&data[16], &flow.addrs.v6addrs.dst, 16);
+
+ hash = xenvif_find_hash(vif, data, sizeof(data));
+ type = PKT_HASH_TYPE_L3;
+ }
+
+ break;
+ }
+
+done:
+ if (type == PKT_HASH_TYPE_NONE)
+ skb_clear_hash(skb);
+ else
+ __skb_set_sw_hash(skb, hash, type == PKT_HASH_TYPE_L4);
+}
+
+u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg)
+{
+ switch (alg) {
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
+ case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
+ break;
+
+ default:
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ vif->hash.alg = alg;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags)
+{
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+
+ *flags = XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags)
+{
+ if (flags & ~(XEN_NETIF_CTRL_HASH_TYPE_IPV4 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6 |
+ XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP))
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.flags = flags;
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len)
+{
+ u8 *key = vif->hash.key;
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+ .dest.u.gmfn = virt_to_gfn(key),
+ .dest.domid = DOMID_SELF,
+ .dest.offset = xen_offset_in_page(key),
+ .len = len,
+ .flags = GNTCOPY_source_gref
+ };
+
+ if (len > XEN_NETBK_MAX_HASH_KEY_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (copy_op.len != 0) {
+ gnttab_batch_copy(&copy_op, 1);
+
+ if (copy_op.status != GNTST_okay)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ /* Clear any remaining key octets */
+ if (len < XEN_NETBK_MAX_HASH_KEY_SIZE)
+ memset(key + len, 0, XEN_NETBK_MAX_HASH_KEY_SIZE - len);
+
+ xenvif_flush_hash(vif);
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
+{
+ if (size > XEN_NETBK_MAX_HASH_MAPPING_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ vif->hash.size = size;
+ memset(vif->hash.mapping, 0, sizeof(u32) * size);
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
+ u32 off)
+{
+ u32 *mapping = &vif->hash.mapping[off];
+ struct gnttab_copy copy_op = {
+ .source.u.ref = gref,
+ .source.domid = vif->domid,
+ .dest.u.gmfn = virt_to_gfn(mapping),
+ .dest.domid = DOMID_SELF,
+ .dest.offset = xen_offset_in_page(mapping),
+ .len = len * sizeof(u32),
+ .flags = GNTCOPY_source_gref
+ };
+
+ if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ while (len-- != 0)
+ if (mapping[off++] >= vif->num_queues)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+
+ if (copy_op.len != 0) {
+ gnttab_batch_copy(&copy_op, 1);
+
+ if (copy_op.status != GNTST_okay)
+ return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ }
+
+ return XEN_NETIF_CTRL_STATUS_SUCCESS;
+}
+
+void xenvif_init_hash(struct xenvif *vif)
+{
+ if (xenvif_hash_cache_size == 0)
+ return;
+
+ spin_lock_init(&vif->hash.cache.lock);
+ INIT_LIST_HEAD(&vif->hash.cache.list);
+}
+
+void xenvif_deinit_hash(struct xenvif *vif)
+{
+ xenvif_flush_hash(vif);
+}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f5231a2dd..83deeebfc 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -128,6 +128,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+irqreturn_t xenvif_ctrl_interrupt(int irq, void *dev_id)
+{
+ struct xenvif *vif = dev_id;
+
+ wake_up(&vif->ctrl_wq);
+
+ return IRQ_HANDLED;
+}
+
int xenvif_queue_stopped(struct xenvif_queue *queue)
{
struct net_device *dev = queue->vif->dev;
@@ -142,6 +151,33 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
}
+static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv,
+ select_queue_fallback_t fallback)
+{
+ struct xenvif *vif = netdev_priv(dev);
+ unsigned int size = vif->hash.size;
+
+ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) {
+ u16 index = fallback(dev, skb) % dev->real_num_tx_queues;
+
+ /* Make sure there is no hash information in the socket
+ * buffer otherwise it would be incorrectly forwarded
+ * to the frontend.
+ */
+ skb_clear_hash(skb);
+
+ return index;
+ }
+
+ xenvif_set_skb_hash(vif, skb);
+
+ if (size == 0)
+ return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
+
+ return vif->hash.mapping[skb_get_hash_raw(skb) % size];
+}
+
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
@@ -386,6 +422,7 @@ static const struct ethtool_ops xenvif_ethtool_ops = {
};
static const struct net_device_ops xenvif_netdev_ops = {
+ .ndo_select_queue = xenvif_select_queue,
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
@@ -527,9 +564,69 @@ void xenvif_carrier_on(struct xenvif *vif)
rtnl_unlock();
}
-int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
- unsigned long rx_ring_ref, unsigned int tx_evtchn,
- unsigned int rx_evtchn)
+int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
+ unsigned int evtchn)
+{
+ struct net_device *dev = vif->dev;
+ void *addr;
+ struct xen_netif_ctrl_sring *shared;
+ struct task_struct *task;
+ int err = -ENOMEM;
+
+ err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
+ &ring_ref, 1, &addr);
+ if (err)
+ goto err;
+
+ shared = (struct xen_netif_ctrl_sring *)addr;
+ BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
+
+ init_waitqueue_head(&vif->ctrl_wq);
+
+ err = bind_interdomain_evtchn_to_irqhandler(vif->domid, evtchn,
+ xenvif_ctrl_interrupt,
+ 0, dev->name, vif);
+ if (err < 0)
+ goto err_unmap;
+
+ vif->ctrl_irq = err;
+
+ xenvif_init_hash(vif);
+
+ task = kthread_create(xenvif_ctrl_kthread, (void *)vif,
+ "%s-control", dev->name);
+ if (IS_ERR(task)) {
+ pr_warn("Could not allocate kthread for %s\n", dev->name);
+ err = PTR_ERR(task);
+ goto err_deinit;
+ }
+
+ get_task_struct(task);
+ vif->ctrl_task = task;
+
+ wake_up_process(vif->ctrl_task);
+
+ return 0;
+
+err_deinit:
+ xenvif_deinit_hash(vif);
+ unbind_from_irqhandler(vif->ctrl_irq, vif);
+ vif->ctrl_irq = 0;
+
+err_unmap:
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->ctrl.sring);
+ vif->ctrl.sring = NULL;
+
+err:
+ return err;
+}
+
+int xenvif_connect_data(struct xenvif_queue *queue,
+ unsigned long tx_ring_ref,
+ unsigned long rx_ring_ref,
+ unsigned int tx_evtchn,
+ unsigned int rx_evtchn)
{
struct task_struct *task;
int err = -ENOMEM;
@@ -538,7 +635,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
BUG_ON(queue->task);
BUG_ON(queue->dealloc_task);
- err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
+ rx_ring_ref);
if (err < 0)
goto err;
@@ -614,7 +712,7 @@ err_tx_unbind:
unbind_from_irqhandler(queue->tx_irq, queue);
queue->tx_irq = 0;
err_unmap:
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
netif_napi_del(&queue->napi);
err:
module_put(THIS_MODULE);
@@ -634,7 +732,7 @@ void xenvif_carrier_off(struct xenvif *vif)
rtnl_unlock();
}
-void xenvif_disconnect(struct xenvif *vif)
+void xenvif_disconnect_data(struct xenvif *vif)
{
struct xenvif_queue *queue = NULL;
unsigned int num_queues = vif->num_queues;
@@ -668,12 +766,33 @@ void xenvif_disconnect(struct xenvif *vif)
queue->tx_irq = 0;
}
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
}
xenvif_mcast_addr_list_free(vif);
}
+void xenvif_disconnect_ctrl(struct xenvif *vif)
+{
+ if (vif->ctrl_task) {
+ kthread_stop(vif->ctrl_task);
+ put_task_struct(vif->ctrl_task);
+ vif->ctrl_task = NULL;
+ }
+
+ if (vif->ctrl_irq) {
+ xenvif_deinit_hash(vif);
+ unbind_from_irqhandler(vif->ctrl_irq, vif);
+ vif->ctrl_irq = 0;
+ }
+
+ if (vif->ctrl.sring) {
+ xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
+ vif->ctrl.sring);
+ vif->ctrl.sring = NULL;
+ }
+}
+
/* Reverse the relevant parts of xenvif_init_queue().
* Used for queue teardown from xenvif_free(), and on the
* error handling paths in xenbus.c:connect().
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4412a57ec..edbae0b1e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -89,6 +89,11 @@ module_param(fatal_skb_slots, uint, 0444);
*/
#define XEN_NETBACK_TX_COPY_LEN 128
+/* This is the maximum number of flows in the hash cache. */
+#define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
+unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
+module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
+MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
u8 status);
@@ -163,6 +168,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
needed++;
+ if (skb->sw_hash)
+ needed++;
do {
prod = queue->rx.sring->req_prod;
@@ -280,6 +287,8 @@ struct gop_frag_copy {
struct xenvif_rx_meta *meta;
int head;
int gso_type;
+ int protocol;
+ int hash_present;
struct page *page;
};
@@ -326,8 +335,15 @@ static void xenvif_setup_copy_gop(unsigned long gfn,
npo->copy_off += *len;
info->meta->size += *len;
+ if (!info->head)
+ return;
+
/* Leave a gap for the GSO descriptor. */
- if (info->head && ((1 << info->gso_type) & queue->vif->gso_mask))
+ if ((1 << info->gso_type) & queue->vif->gso_mask)
+ queue->rx.req_cons++;
+
+ /* Leave a gap for the hash extra segment. */
+ if (info->hash_present)
queue->rx.req_cons++;
info->head = 0; /* There must be something in this buffer now */
@@ -362,6 +378,11 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
.npo = npo,
.head = *head,
.gso_type = XEN_NETIF_GSO_TYPE_NONE,
+ /* xenvif_set_skb_hash() will have either set a s/w
+ * hash or cleared the hash depending on
+ * whether the the frontend wants a hash for this skb.
+ */
+ .hash_present = skb->sw_hash,
};
unsigned long bytes;
@@ -550,6 +571,7 @@ void xenvif_kick_thread(struct xenvif_queue *queue)
static void xenvif_rx_action(struct xenvif_queue *queue)
{
+ struct xenvif *vif = queue->vif;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
@@ -585,9 +607,10 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
+ struct xen_netif_extra_info *extra = NULL;
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_prefix_mask) {
+ vif->gso_prefix_mask) {
resp = RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
@@ -605,7 +628,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
queue->stats.tx_bytes += skb->len;
queue->stats.tx_packets++;
- status = xenvif_check_gop(queue->vif,
+ status = xenvif_check_gop(vif,
XENVIF_RX_CB(skb)->meta_slots_used,
&npo);
@@ -627,21 +650,57 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
flags);
if ((1 << queue->meta[npo.meta_cons].gso_type) &
- queue->vif->gso_mask) {
- struct xen_netif_extra_info *gso =
- (struct xen_netif_extra_info *)
+ vif->gso_mask) {
+ extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&queue->rx,
queue->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
- gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
- gso->u.gso.pad = 0;
- gso->u.gso.features = 0;
+ extra->u.gso.type = queue->meta[npo.meta_cons].gso_type;
+ extra->u.gso.size = queue->meta[npo.meta_cons].gso_size;
+ extra->u.gso.pad = 0;
+ extra->u.gso.features = 0;
- gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
- gso->flags = 0;
+ extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
+ extra->flags = 0;
+ }
+
+ if (skb->sw_hash) {
+ /* Since the skb got here via xenvif_select_queue()
+ * we know that the hash has been re-calculated
+ * according to a configuration set by the frontend
+ * and therefore we know that it is legitimate to
+ * pass it to the frontend.
+ */
+ if (resp->flags & XEN_NETRXF_extra_info)
+ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
+ else
+ resp->flags |= XEN_NETRXF_extra_info;
+
+ extra = (struct xen_netif_extra_info *)
+ RING_GET_RESPONSE(&queue->rx,
+ queue->rx.rsp_prod_pvt++);
+
+ extra->u.hash.algorithm =
+ XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
+
+ if (skb->l4_hash)
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
+ else
+ extra->u.hash.type =
+ skb->protocol == htons(ETH_P_IP) ?
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
+ _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
+
+ *(uint32_t *)extra->u.hash.value =
+ skb_get_hash_raw(skb);
+
+ extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
+ extra->flags = 0;
}
xenvif_add_frag_responses(queue, status,
@@ -1451,6 +1510,33 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
}
}
+ if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
+ struct xen_netif_extra_info *extra;
+ enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
+
+ extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
+
+ switch (extra->u.hash.type) {
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
+ type = PKT_HASH_TYPE_L3;
+ break;
+
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
+ case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
+ type = PKT_HASH_TYPE_L4;
+ break;
+
+ default:
+ break;
+ }
+
+ if (type != PKT_HASH_TYPE_NONE)
+ skb_set_hash(skb,
+ *(u32 *)extra->u.hash.value,
+ type);
+ }
+
XENVIF_TX_CB(skb)->pending_idx = pending_idx;
__skb_put(skb, data_len);
@@ -1926,7 +2012,7 @@ static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
return queue->dealloc_cons != queue->dealloc_prod;
}
-void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
+void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
{
if (queue->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
@@ -1936,9 +2022,9 @@ void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
queue->rx.sring);
}
-int xenvif_map_frontend_rings(struct xenvif_queue *queue,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
@@ -1965,7 +2051,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue,
return 0;
err:
- xenvif_unmap_frontend_rings(queue);
+ xenvif_unmap_frontend_data_rings(queue);
return err;
}
@@ -2164,6 +2250,135 @@ int xenvif_dealloc_kthread(void *data)
return 0;
}
+static void make_ctrl_response(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req,
+ u32 status, u32 data)
+{
+ RING_IDX idx = vif->ctrl.rsp_prod_pvt;
+ struct xen_netif_ctrl_response rsp = {
+ .id = req->id,
+ .type = req->type,
+ .status = status,
+ .data = data,
+ };
+
+ *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
+ vif->ctrl.rsp_prod_pvt = ++idx;
+}
+
+static void push_ctrl_response(struct xenvif *vif)
+{
+ int notify;
+
+ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
+ if (notify)
+ notify_remote_via_irq(vif->ctrl_irq);
+}
+
+static void process_ctrl_request(struct xenvif *vif,
+ const struct xen_netif_ctrl_request *req)
+{
+ u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
+ u32 data = 0;
+
+ switch (req->type) {
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
+ status = xenvif_set_hash_alg(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
+ status = xenvif_get_hash_flags(vif, &data);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
+ status = xenvif_set_hash_flags(vif, req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
+ status = xenvif_set_hash_key(vif, req->data[0],
+ req->data[1]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
+ status = XEN_NETIF_CTRL_STATUS_SUCCESS;
+ data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
+ status = xenvif_set_hash_mapping_size(vif,
+ req->data[0]);
+ break;
+
+ case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
+ status = xenvif_set_hash_mapping(vif, req->data[0],
+ req->data[1],
+ req->data[2]);
+ break;
+
+ default:
+ break;
+ }
+
+ make_ctrl_response(vif, req, status, data);
+ push_ctrl_response(vif);
+}
+
+static void xenvif_ctrl_action(struct xenvif *vif)
+{
+ for (;;) {
+ RING_IDX req_prod, req_cons;
+
+ req_prod = vif->ctrl.sring->req_prod;
+ req_cons = vif->ctrl.req_cons;
+
+ /* Make sure we can see requests before we process them. */
+ rmb();
+
+ if (req_cons == req_prod)
+ break;
+
+ while (req_cons != req_prod) {
+ struct xen_netif_ctrl_request req;
+
+ RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
+ req_cons++;
+
+ process_ctrl_request(vif, &req);
+ }
+
+ vif->ctrl.req_cons = req_cons;
+ vif->ctrl.sring->req_event = req_cons + 1;
+ }
+}
+
+static bool xenvif_ctrl_work_todo(struct xenvif *vif)
+{
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
+ return 1;
+
+ return 0;
+}
+
+int xenvif_ctrl_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ for (;;) {
+ wait_event_interruptible(vif->ctrl_wq,
+ xenvif_ctrl_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ while (xenvif_ctrl_work_todo(vif))
+ xenvif_ctrl_action(vif);
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int __init netback_init(void)
{
int rc = 0;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index bd182cd55..6a31f2610 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -38,7 +38,8 @@ struct backend_info {
const char *hotplug_script;
};
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
+static int connect_data_rings(struct backend_info *be,
+ struct xenvif_queue *queue);
static void connect(struct backend_info *be);
static int read_xenbus_vif_flags(struct backend_info *be);
static int backend_create_xenvif(struct backend_info *be);
@@ -367,6 +368,12 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
pr_debug("Error writing multi-queue-max-queues\n");
+ err = xenbus_printf(XBT_NIL, dev->nodename,
+ "feature-ctrl-ring",
+ "%u", true);
+ if (err)
+ pr_debug("Error writing feature-ctrl-ring\n");
+
script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
if (IS_ERR(script)) {
err = PTR_ERR(script);
@@ -457,7 +464,8 @@ static void backend_disconnect(struct backend_info *be)
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
- xenvif_disconnect(be->vif);
+ xenvif_disconnect_data(be->vif);
+ xenvif_disconnect_ctrl(be->vif);
}
}
@@ -825,6 +833,48 @@ static void hotplug_status_changed(struct xenbus_watch *watch,
kfree(str);
}
+static int connect_ctrl_ring(struct backend_info *be)
+{
+ struct xenbus_device *dev = be->dev;
+ struct xenvif *vif = be->vif;
+ unsigned int val;
+ grant_ref_t ring_ref;
+ unsigned int evtchn;
+ int err;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "ctrl-ring-ref", "%u", &val, NULL);
+ if (err)
+ goto done; /* The frontend does not have a control ring */
+
+ ring_ref = val;
+
+ err = xenbus_gather(XBT_NIL, dev->otherend,
+ "event-channel-ctrl", "%u", &val, NULL);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "reading %s/event-channel-ctrl",
+ dev->otherend);
+ goto fail;
+ }
+
+ evtchn = val;
+
+ err = xenvif_connect_ctrl(vif, ring_ref, evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "mapping shared-frame %u port %u",
+ ring_ref, evtchn);
+ goto fail;
+ }
+
+done:
+ return 0;
+
+fail:
+ return err;
+}
+
static void connect(struct backend_info *be)
{
int err;
@@ -861,6 +911,12 @@ static void connect(struct backend_info *be)
xen_register_watchers(dev, be->vif);
read_xenbus_vif_flags(be);
+ err = connect_ctrl_ring(be);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "connecting control ring");
+ return;
+ }
+
/* Use the number of queues requested by the frontend */
be->vif->queues = vzalloc(requested_num_queues *
sizeof(struct xenvif_queue));
@@ -896,11 +952,12 @@ static void connect(struct backend_info *be)
queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;
- err = connect_rings(be, queue);
+ err = connect_data_rings(be, queue);
if (err) {
- /* connect_rings() cleans up after itself on failure,
- * but we need to clean up after xenvif_init_queue() here,
- * and also clean up any previously initialised queues.
+ /* connect_data_rings() cleans up after itself on
+ * failure, but we need to clean up after
+ * xenvif_init_queue() here, and also clean up any
+ * previously initialised queues.
*/
xenvif_deinit_queue(queue);
be->vif->num_queues = queue_index;
@@ -935,15 +992,17 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
- xenvif_disconnect(be->vif); /* Clean up existing queues */
+ xenvif_disconnect_data(be->vif); /* Clean up existing queues */
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
+ xenvif_disconnect_ctrl(be->vif);
return;
}
-static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
+static int connect_data_rings(struct backend_info *be,
+ struct xenvif_queue *queue)
{
struct xenbus_device *dev = be->dev;
unsigned int num_queues = queue->vif->num_queues;
@@ -1007,8 +1066,8 @@ static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
}
/* Map the shared frame, irq etc. */
- err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref,
- tx_evtchn, rx_evtchn);
+ err = xenvif_connect_data(queue, tx_ring_ref, rx_ring_ref,
+ tx_evtchn, rx_evtchn);
if (err) {
xenbus_dev_fatal(dev, err,
"mapping shared-frames %lu/%lu port tx %u rx %u",